id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
43025 | <reponame>emerginganalytics/ualr-cyber-gym
import base64
import onetimepass
import os
from flask import abort, Flask, redirect
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, UserMixin
from flask_sqlalchemy import SQLAlchemy
from globals import ds_client
from werkzeug.security import check_password_hash, generate_password_hash
# App Blueprint imports
from arena_snake.routes import arena_snake_bp
from inspect_workout.routes import inspect_bp
from sql_injection.routes import sql_injection_bp
from twofactorauth.routes import twofactorauth_bp
from wireshark.routes import wireshark_bp
from xss.routes import xss_bp
# Application instance
app = Flask(__name__)
app.secret_key = os.urandom(12)
bootstrap = Bootstrap(app)
# db Config
db = SQLAlchemy(app)
lm = LoginManager(app)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'sqlite:///db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
class User(UserMixin, db.Model):
"""User model."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), index=True)
password_hash = db.Column(db.String(128))
otp_secret = db.Column(db.String(16))
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if self.otp_secret is None:
# generate a random secret
self.otp_secret = base64.b32encode(os.urandom(10)).decode('utf-8')
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def get_totp_uri(self):
return 'otpauth://totp/CyberGym:{0}?secret={1}&issuer=CyberGym2FA'.format(self.username, self.otp_secret)
def verify_totp(self, token):
return onetimepass.valid_totp(token, self.otp_secret)
@lm.user_loader
def load_user(user_id):
"""User loader callback for Flask-Login."""
return User.query.get(int(user_id))
# Register app blueprints
app.register_blueprint(arena_snake_bp)
app.register_blueprint(inspect_bp)
app.register_blueprint(sql_injection_bp)
app.register_blueprint(twofactorauth_bp)
app.register_blueprint(wireshark_bp)
app.register_blueprint(xss_bp)
@app.route('/<workout_id>')
def loader(workout_id):
key = ds_client.key('cybergym-workout', workout_id)
workout = ds_client.get(key)
# Verify that request is for a valid workout and redirect based on type
valid_types = {
'wireshark': '/%s' % workout_id,
'xss': '/xss/xss_d/%s' % workout_id,
'2fa': '/tfh/%s' % workout_id,
'inspect': '/inspect/%s' % workout_id,
'sql_injection': '/sql_injection/%s' % workout_id,
'arena_snake': '/arena_snake/%s' % workout_id
}
if workout:
workout_type = workout['type']
if workout_type in valid_types:
# Any route specific logic is handled at the individual blueprint
# level. Return redirect to specific blueprint
return redirect(valid_types[workout_type])
else:
return abort(404)
# Create database if none exist
db.create_all()
if __name__ == "__main__":
# app.run(debug=True, host='0.0.0.0', port=4000)
app.run(debug=True)
| StarcoderdataPython |
1890191 | # -*- coding: utf-8 -*-
# Copyright 2016, 2017 Openworx
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
{
"name": "Material/United Backend Theme",
"summary": "Odoo 11.0 community backend theme",
"version": "11.0.1.0.4",
"category": "Themes/Backend",
"website": "http://www.openworx.nl",
"description": """
Backend theme for Odoo 11.0 community edition.
""",
'images':[
'images/screen.png'
],
"author": "Openworx",
"license": "LGPL-3",
"installable": True,
"depends": [
'web_responsive',
],
"data": [
'views/assets.xml',
'views/res_company_view.xml',
#'views/users.xml',
'views/sidebar.xml',
# 'views/web.xml',
],
}
| StarcoderdataPython |
6426649 | <reponame>Mr-TelegramBot/python-tdlib<filename>py_tdlib/constructors/authentication_code_type_call.py
from ..factory import Type
class authenticationCodeTypeCall(Type):
length = None # type: "int32"
| StarcoderdataPython |
126700 | <filename>forms/admin.py<gh_stars>0
"""Form Admin"""
from django.contrib import admin
from .models import Form, Question
admin.site.register(Form)
admin.site.register(Question)
| StarcoderdataPython |
3281467 | <filename>datahub/interaction/migrations/0071_add_large_capital_opportunity_and_update_themes.py<gh_stars>1-10
# Generated by Django 3.1.5 on 2021-02-10 11:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('interaction', '0070_add_interaction_export_countries'),
('opportunity', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='interaction',
name='theme',
field=models.CharField(blank=True, choices=[(None, 'Not set'), ('export', 'Export'), ('investment', 'Investment'), ('large_capital_opportunity', 'Large capital opportunity'), ('other', 'Something else')], max_length=255, null=True),
),
migrations.AddField(
model_name='interaction',
name='large_capital_opportunity',
field=models.ForeignKey(blank=True, help_text='For interactions only.', null=True,
on_delete=django.db.models.deletion.CASCADE, related_name='interactions',
to='opportunity.largecapitalopportunity'),
),
]
| StarcoderdataPython |
9722323 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 27 15:57:09 2020
@author: AzureD
package module test
"""
variable = "test 22" # module level variables
def run():
print(f"Success! Reloaded! {variable}") | StarcoderdataPython |
1699222 | POKEMON_SPECIES_MEWTWO_JSON_RESPONSE = {
"flavor_text_entries": [
{
"flavor_text": "It was created by\na scientist after\nyears "
"of horrific\fgene splicing and"
"\nDNA engineering\nexperiments.",
"language": {
"name": "en"
}
},
{
"flavor_text": "It was created by\na scientist after\nyears "
"of horrific\fgene splicing and\nDNA "
"engineering\nexperiments.",
"language": {
"name": "en"
}
},
{
"flavor_text": "Mewtwo è stato creato grazie a una manipolazione "
"genetica.\nTuttavia, sebbene la scienza sia "
"riuscita a creare un corpo\ndi Pokémon, "
"ha fallito nell’intento di dare a Mewtwo"
"\nun animo generoso.",
"language": {
"name": "it"
},
}
],
"habitat": {
"name": "rare"
},
"is_legendary": 'true',
"name": "mewtwo"
}
POKEMON_SPECIES_PIKACHU_JSON_RESPONSE = {
"flavor_text_entries": [
{
"flavor_text": "Solleva la coda per esaminare "
"l’ambiente circostante.\nA volte "
"la coda è colpita da un fulmine "
"quando è in\nquesta posizione.",
"language": {
"name": "it"
},
},
{
"flavor_text": "When several of\nthese POKéMON"
"\ngather, their\felectricity "
"could\nbuild and cause"
"\nlightning storms.",
"language": {
"name": "en"
}
},
{
"flavor_text": "When several of\nthese POKéMON"
"\ngather, their\felectricity "
"could\nbuild and cause"
"\nlightning storms.",
"language": {
"name": "en"
}
},
],
"habitat": {
"name": "forest"
},
"is_legendary": 'false',
"name": "pikachu"
}
POKEMON_SPECIES_ZUBAT_JSON_RESPONSE = {
"flavor_text_entries": [
{
"flavor_text": "Forms colonies in\nperpetually "
"dark\nplaces. Uses\fultrasonic waves"
"\nto identify and\napproach targets.",
"language": {
"name": "en"
}
},
{
"flavor_text": "Forms colonies in\nperpetually "
"dark\nplaces. Uses\fultrasonic waves\nto "
"identify and\napproach targets.",
"language": {
"name": "en"
}
},
{
"flavor_text": "Non ha occhi, ma si sposta agilmente "
"nella notte\nemettendo ultrasuoni la cui "
"eco lo avverte degli\nostacoli.",
"language": {
"name": "it"
},
}
],
"habitat": {
"name": "cave"
},
"is_legendary": 'false',
"name": "zubat"
}
| StarcoderdataPython |
1690153 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys
import datetime
import pandas as pd
import tabula
import re
from janome.tokenizer import Tokenizer
import numpy as np
def getLastMeetingNo():
df = pd.read_csv('data/monitoring_comments.csv')
if len(df.index) > 0:
return int(df.tail(1)['meeting_no'])
else:
return 0
def getLatestMeetingInfo():
html = urlopen('https://www.bousai.metro.tokyo.lg.jp/taisaku/saigai/1013388/index.html')
bsObj = BeautifulSoup(html, 'html.parser')
ul = bsObj.select_one('ul.listlink')
li = ul.select_one('li')
url = 'https://www.bousai.metro.tokyo.lg.jp/' + li.select_one('a').get('href').replace('../../../', '')
title = li.select_one('a').text
no = int(re.search(r'第(\d+?)回', title).group(1))
date = re.search(r'令和(\d+?)年(\d+?)月(\d+?)日', title)
y = 2018 + int(date.group(1))
m = int(date.group(2))
d = int(date.group(3))
date = '{0:%Y-%m-%d}'.format(datetime.date(y,m,d))
return no, date, url
def getCommentPdfUrl(meeting_url):
html = urlopen(meeting_url)
bsObj = BeautifulSoup(html, 'html.parser')
ul = bsObj.select_one('ul.objectlink')
lis = ul.select('li')
for li in lis:
a = li.select_one('a')
title = a.text
if '専門家によるモニタリングコメント・意見' in title:
url = 'https://www.bousai.metro.tokyo.lg.jp/' + a.get('href').replace('../../../', '')
return url
def getPdfDataFrame(pdf_url):
df = tabula.read_pdf(pdf_url, lattice=True, pages='all')
df = pd.concat(df, ignore_index=True)
df.columns = ['monitoring_index', 'graph', 'monitoring_comment']
df['monitoring_comment']=df['monitoring_comment'].fillna(df['graph'])
df = df.drop('graph', axis=1)
df['monitoring_index']=df['monitoring_index'].replace(r'[\r\n]','', regex=True)
df['monitoring_comment']=df['monitoring_comment'].replace(r'[\r\n]','', regex=True)
df['monitoring_index']=df['monitoring_index'].fillna(method='ffill')
df['monitoring_index']=df['monitoring_index'].apply(lambda x: str(x)[0])
df = df[df['monitoring_index'] != 'n']
df = df.groupby('monitoring_index')['monitoring_comment'].apply(''.join).reset_index()
return df
def splitComment(df):
df_split = pd.DataFrame(index=[], columns=['meeting_no', 'meeting_date', 'monitoring_index', 'line_number', 'monitoring_comment'])
for row in df.itertuples(name=None):
meeting_no = row[1]
meeting_date = row[2]
monitoring_index = row[3]
monitoring_comment = row[4]
splits = monitoring_comment.split('。')
splits.pop()
for i in range(len(splits)):
df_split.loc[len(df_split.index)] = [meeting_no, meeting_date, monitoring_index, i + 1, splits[i] + '。']
return df_split
def generateToken(df):
df_token = pd.DataFrame(index=[], columns=['meeting_no', 'meeting_date', 'monitoring_index', 'line_number', 'token', 'part_of_speech', 'part_of_speech2', 'part_of_speech3', 'part_of_speech4', 'infl_type', 'base_form'])
for row in df.itertuples(name=None):
meeting_no = row[1]
meeting_date = row[2]
monitoring_index = row[3]
line_number = row[4]
t = Tokenizer()
tokens = t.tokenize(row[5])
for token in tokens:
if not re.search(r'[、。I,%%~~##※\\\(\)\.\-\/]', token.surface) and token.surface not in ['ア', 'イ', 'ウ', 'エ', 'オ', 'カ', 'キ']:
word_category = token.part_of_speech.split(',')[0]
word_type = token.part_of_speech.split(',')[1]
if word_category == '名詞' and word_type != '数'and word_type != '代名詞' and word_type != '非自立' and word_type != '接尾':
df_token.loc[len(df_token.index)] = [meeting_no, meeting_date, monitoring_index, line_number, token.surface] + token.part_of_speech.split(',') + [token.infl_type, token.base_form]
df_token = df_token.replace('*', np.nan)
return df_token
def main():
last_meeting_no = getLastMeetingNo()
meeting_no, meeting_date, meeting_url = getLatestMeetingInfo()
if meeting_no > last_meeting_no:
comment_pdf_url = getCommentPdfUrl(meeting_url)
df = getPdfDataFrame(comment_pdf_url)
df['meeting_no'] = meeting_no
df['meeting_date'] = meeting_date
df = df[['meeting_no', 'meeting_date', 'monitoring_index', 'monitoring_comment']]
df.to_csv('data/monitoring_comments.csv', mode='a', header=False, index=False, encoding='utf-8-sig')
df_split = splitComment(df)
df_split.to_csv('data/monitoring_comments_split.csv', mode='a', header=False, index=False, encoding='utf-8-sig')
df_token = generateToken(df_split)
df_token.to_csv('data/monitoring_comments_token.csv', mode='a', header=False, index=False, encoding='utf-8-sig')
else:
print('error')
print('meeting_no = ', meeting_no)
print('last_meeting_no = ', last_meeting_no)
sys.exit(1)
if __name__ == '__main__':
main() | StarcoderdataPython |
11211198 | """Setup script for slim."""
from setuptools import find_packages
from setuptools import setup
setup(
name='slim',
version='0.1',
include_package_data=True,
packages=find_packages(),
description='tf-slim',
)
| StarcoderdataPython |
11354486 | # -*- coding: utf-8 -*-
def main():
from collections import deque
import sys
input = sys.stdin.readline
h, w = map(int, input().split())
a = [list(input().rstrip()) for _ in range(h)]
sy, sx = 0, 0
gy, gx = 0, 0
inf = 10 ** 18
dist = [[inf for _ in range(w)] for __ in range(h)]
d = deque()
dxy = [(-1, 0), (1, 0), (0, -1), (0, 1)]
teleport = dict()
teleport = [[] for _ in range(26)]
is_used = [False for _ in range(26)]
for i in range(h):
for j in range(w):
if a[i][j] == "S":
sy, sx = i, j
dist[sy][sx] = 0
d.append((sy, sx))
elif a[i][j] == "G":
gy, gx = i, j
elif a[i][j].islower():
diff = ord(a[i][j]) - ord("a")
teleport[diff].append((i, j))
while d:
y, x = d.popleft()
if y == gy and x == gx:
print(dist[y][x])
exit()
for dx, dy in dxy:
nx = x + dx
ny = y + dy
if nx < 0 or nx >= w:
continue
if ny < 0 or ny >= h:
continue
if a[ny][nx] == "#":
continue
if dist[ny][nx] != inf:
continue
dist[ny][nx] = dist[y][x] + 1
d.append((ny, nx))
if a[y][x].islower():
diff = ord(a[y][x]) - ord("a")
if is_used[diff]:
continue
for ny, nx in teleport[diff]:
if dist[ny][nx] != inf:
continue
dist[ny][nx] = dist[y][x] + 1
d.append((ny, nx))
is_used[diff] = True
print(-1)
if __name__ == "__main__":
main()
| StarcoderdataPython |
237020 | #!/usr/bin/env python -O
"""
This is the test class for testing Kaplan-Meier module algorithms
and models.
"""
# -*- coding: utf-8 -*-
#
# tests.survival.TestKaplanMeier.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> andrew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))) + "/rtk", )
import unittest
from nose.plugins.attrib import attr
import numpy as np
import dao.DAO as _dao
from analyses.survival.KaplanMeier import *
from survival.Record import Model as Record
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2015 Andrew "Weibullguy" Rowland'
class TestKaplanMeier(unittest.TestCase):
"""
Class for testing the KaplanMeier data model class.
"""
@attr(all=True, unit=True)
def test_format_data(self):
"""
(TestKaplanMeier) format_data should return a dictionary of lists on success
"""
_data = {}
_assembly_id = [0, 0, 0, 0, 1, 1, 1]
_fail_times = [56.7, 116.4, 152.1, 198.4, 233.3, 286.1, 322.9]
_status = [0, 0, 0, 1, 0, 0, 1]
_n_failures = [1, 1, 1, 1, 1, 2, 1]
for i in range(len(_fail_times)):
_record = Record()
_record.assembly_id = _assembly_id[i]
_record.right_interval = _fail_times[i]
_record.status = _status[i]
_record.n_failures = _n_failures[i]
_data[i] = _record
self.assertEqual(format_data(_data),
([(0, 0.0, 56.7, 0.0, 0, 1, 719163),
(1, 0.0, 116.4, 0.0, 0, 1, 719163),
(2, 0.0, 152.1, 0.0, 0, 1, 719163),
(3, 0.0, 198.4, 0.0, 1, 1, 719163),
(4, 0.0, 233.3, 0.0, 0, 1, 719163),
(5, 0.0, 286.1, 0.0, 0, 2, 719163),
(6, 0.0, 322.9, 0.0, 1, 1, 719163)], 6))
@attr(all=True, unit=True)
def test_kaplan_meier(self):
"""
(TestKaplanMeier) kaplan_meier should return a numpy matrix of floats on success
"""
# Data is from Lee and Wang, page 69, example 4.2.
_data = [('', 3.0, 3.0, 0.0, u'Event', 1),
('', 4.0, 4.0, 0.0, u'Right Censored', 1),
('', 5.7, 5.7, 0.0, u'Right Censored', 1),
('', 6.5, 6.5, 0.0, u'Event', 1),
('', 6.5, 6.5, 0.0, u'Event', 1),
('', 8.4, 8.4, 0.0, u'Right Censored', 1),
('', 10.0, 10.0, 0.0, u'Event', 1),
('', 10.0, 10.0, 0.0, u'Right Censored', 1),
('', 12.0, 12.0, 0.0, u'Event', 1),
('', 15.0, 15.0, 0.0, u'Event', 1)]
_km = kaplan_meier(_data, 0.0, 100000.0)
self.assertTrue(np.allclose(_km[0],
[[3.0, 0.71671928, 0.9, 0.96722054],
[4.0, 0.71671928, 0.9, 0.96722054],
[5.7, 0.71671928, 0.9, 0.96722054],
[6.5, 0.41797166, 0.64285714, 0.79948773],
[8.4, 0.41797166, 0.64285714, 0.79948773],
[10.0, 0.25976276, 0.48214286, 0.67381139],
[12.0, 0.06504527, 0.24107143, 0.47680147],
[15.0, 0.0, 0.0, 0.0]]))
self.assertTrue(np.allclose(_km[1], [1, 4, 5, 7, 9, 10]))
@attr(all=True, unit=True)
def test_kaplan_meier_mean(self):
"""
(TestKaplanMeier) kaplan_meier_mean should return a numpy 1-D matrix of integers on success
"""
# This data is the result of the executing the Kaplan-Meier function
# using the data set from the previous test.
_data = np.array([[3.0, 0.71671928, 0.9, 0.96722054],
[4.0, 0.71671928, 0.9, 0.96722054],
[5.7, 0.71671928, 0.9, 0.96722054],
[6.5, 0.41797166, 0.64285714, 0.79948773],
[8.4, 0.41797166, 0.64285714, 0.79948773],
[10.0, 0.25976276, 0.48214286, 0.67381139],
[12.0, 0.06504527, 0.24107143, 0.47680147],
[15.0, 0.0, 0.0, 0.0]])
_rank = [1, 4, 5, 7, 9, 10]
_km_mean = kaplan_meier_mean(_data, _rank, 0.9)
self.assertTrue(np.allclose(_km_mean,
[8.14115869673, 10.0875, 12.0338413033]))
@attr(all=True, unit=True)
def test_kaplan_meier_hazard(self):
"""
(TestKaplanMeier) kaplan_meier_hazard should return a numpy matrix of floats on success
"""
_data = np.array([[3.0, 0.71671928, 0.9, 0.96722054],
[4.0, 0.71671928, 0.9, 0.96722054],
[5.7, 0.71671928, 0.9, 0.96722054],
[6.5, 0.41797166, 0.64285714, 0.79948773],
[8.4, 0.41797166, 0.64285714, 0.79948773],
[10.0, 0.25976276, 0.48214286, 0.67381139],
[12.0, 0.06504527, 0.24107143, 0.47680147],
[15.0, 0.0, 0.0, 0.0]])
_km_hazard = kaplan_meier_hazard(_data)
self.assertTrue(np.allclose(_km_hazard,
[[3.00000000e+00, 4.00000000e+00,
5.70000000e+00, 6.50000000e+00,
8.40000000e+00, 1.00000000e+01,
1.20000000e+01, 1.50000000e+01],
[1.11023678e-01, 8.32677588e-02,
5.84335150e-02, 1.34206407e-01,
1.03850196e-01, 1.34798653e-01,
2.27722649e-01, 0.00000000e+00],
[3.51201719e-02, 2.63401289e-02,
1.84843010e-02, 6.79742703e-02,
5.25991377e-02, 7.29514819e-02,
1.18555167e-01, 0.00000000e+00],
[1.11095811e-02, 8.33218584e-03,
5.84714796e-03, 3.44283221e-02,
2.66409636e-02, 3.94805044e-02,
6.17212567e-02, 0.00000000e+00],
[3.33071035e-01, 3.33071035e-01,
3.33071035e-01, 8.72341648e-01,
8.72341648e-01, 1.34798653e+00,
2.73267179e+00, -0.00000000e+00],
[1.05360516e-01, 1.05360516e-01,
1.05360516e-01, 4.41832757e-01,
4.41832757e-01, 7.29514819e-01,
1.42266200e+00, -0.00000000e+00],
[3.33287433e-02, 3.33287433e-02,
3.33287433e-02, 2.23784094e-01,
2.23784094e-01, 3.94805044e-01,
7.40655080e-01, -0.00000000e+00],
[-1.09939949e+00, -1.09939949e+00,
-1.09939949e+00, -1.36574134e-01,
-1.36574134e-01, 2.98612017e-01,
1.00527981e+00, -0.00000000e+00],
[-2.25036733e+00, -2.25036733e+00,
-2.25036733e+00, -8.16823847e-01,
-8.16823847e-01, -3.15375598e-01,
3.52529764e-01, -0.00000000e+00],
[-3.40133509e+00, -3.40133509e+00,
-3.40133509e+00, -1.49707356e+00,
-1.49707356e+00, -9.29363195e-01,
-3.00220241e-01, -0.00000000e+00]]))
| StarcoderdataPython |
9734829 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os
import sys
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from PyQt5.QtNetwork import *
from PyQt5.QtWebEngineWidgets import *
from PyQt5.QtXmlPatterns import *
import meshandler
class XmlRestExecutor(QWidget):
def __init__(self, iniFile, parent=None):
super().__init__(parent)
self.topLay = QVBoxLayout(self)
self.topLay.setContentsMargins(6,6,6,6)
self.lay = QFormLayout()
self.topLay.addLayout(self.lay)
self.resultLay = QVBoxLayout()
self.topLay.addLayout(self.resultLay)
self.man = QNetworkAccessManager(self)
self.page = QWebEngineView(self)
self.page.settings().setDefaultTextEncoding("windows-1251")
self.topLay.addWidget(self.page)
self.bar = QStatusBar(self)
self.topLay.addWidget(self.bar)
self.loadIni(iniFile)
## self.man.finished.connect(self.netFinished)
def loadIni(self, iniFile):
ini = QSettings(iniFile, QSettings.IniFormat)
ini.setIniCodec("utf-8")
ini.beginGroup("Common")
wt = ini.value('Title','')
if wt != '': self.setWindowTitle(wt)
ini.endGroup()
ini.beginGroup("WebPage")
self.url = ini.value('Url')
self.bodyFile = ini.value('Body')
self.method = ini.value('Method')
if self.method == None:
self.method = 'get' if self.bodyFile == None else 'post'
self.transformTemplate = ini.value('Transform')
ini.endGroup()
self.readInputs(ini)
self.runBtn = QPushButton("Run")
self.runBtn.setDefault(True)
self.runBtn.clicked.connect(self.run)
self.btnLay = QHBoxLayout()
self.btnLay.addStretch()
self.btnLay.addWidget(self.runBtn)
self.lay.addRow(self.btnLay)
def readInputs(self, ini):
self.inputs = {}
self.params = []
ini.beginGroup("Input")
for key in sorted(ini.childKeys()):
v = ini.value(key)
if type(v) != type([]):
v = [v]
if len(v)>1:
paramTitle = v[0]
paramValue = v[1]
else:
paramTitle = key
paramValue = v[0]
self.params.append([key, paramTitle, paramValue])
if paramTitle != '':
le = QLineEdit()
self.inputs[key] = le
le.setText(paramValue)
le.paramTitle = paramTitle
self.lay.addRow(paramTitle, le)
for kp in self.params:
key = kp[0]
paramTitle = kp[1]
paramValue = kp[2]
if paramTitle == '':
le = self.inputs[paramValue]
self.inputs[key] = le
ini.endGroup()
def run(self):
self.runBtn.setEnabled(False)
try:
values = { kp[0]:self.inputs[kp[0]].text() for kp in self.params}
url = self.url.format(**values)
req = QNetworkRequest(QUrl(url))
if self.method == 'get':
reply = self.man.get(req)
else:
f = QFile(self.bodyFile)
if f.open(QIODevice.ReadOnly):
body = str(f.readAll(),'utf-8-sig')
else:
body = ''
body = body.format(**values)
body = bytes(body,'utf-8')
req.setHeader(QNetworkRequest.ContentTypeHeader,"text/xml;charset=UTF-8")
reply = self.man.post(req,body)
reply.finished.connect(self.replyFinished)
except:
self.runBtn.setEnabled(True)
print(str(sys.exc_info()[1]))
def replyFinished(self):
self.runBtn.setEnabled(True)
reply = self.sender()
hasTempl = False
if self.transformTemplate != None:
filename, ext = os.path.splitext(self.transformTemplate)
if ext.lower() == '.xq' or ext.lower() == '.xquery':
lang = QXmlQuery.XQuery10
elif ext.lower() == '.xsl' or ext.lower() == '.xslt':
lang = QXmlQuery.XSLT20
else:
return
templ = QFile(self.transformTemplate)
if templ.open(QIODevice.ReadOnly):
hasTempl = True
else:
print("Can't open template",self.transformTemplate)
if hasTempl:
query = QXmlQuery(lang)
query.setMessageHandler(XmlQueryMessageHandler())
query.setFocus(reply)
query.setQuery(templ)
if query.isValid():
out = query.evaluateToString()
if out != None:
self.page.setHtml(out)
return
if reply.error() != QNetworkReply.NoError:
print("Error:", reply.error())
return
b = reply.readAll()
cth = reply.header(QNetworkRequest.ContentTypeHeader)
if len(cth.split(';')) == 1:
cth = cth + ";charset=windows-1251"
self.page.setContent(b,cth,reply.url())
return
class XmlQueryMessageHandler(QAbstractMessageHandler):
def handleMessage(self, msgType, desc, idUrl, source):
print("Msg(%s) on (%s,%s): %s" % (msgType, source.line(), source.column(), desc))
if __name__ == '__main__':
# Download OpenGL dll from here
# http://download.qt.io/development_releases/prebuilt/llvmpipe/windows/
#
# os.environ.putenv('QT_OPENGL','software') # desktop, software, angle
app = QApplication(sys.argv)
ex = XmlRestExecutor("valcurs.ini")
ex.show()
sys.exit(app.exec_())
| StarcoderdataPython |
1697683 | <reponame>jnthn/intellij-community<gh_stars>1-10
print("<selection>Hello</selection> %(name)s" % {"name": "World"}) | StarcoderdataPython |
11360291 | <reponame>Starrah/THU-SuperMoon
import torch
from SuperMoon.utils.meter import CIndexMeter
def test_CIndexMeter():
c_index = CIndexMeter()
preds = [0.4, 0.3, 0.6]
targets = [0.5, 0.2, 0.4]
for pred, target in zip(preds, targets):
c_index.add(torch.tensor(pred), torch.tensor(target))
assert c_index.value() - 2.0 / 3 < 1e-6
| StarcoderdataPython |
1682003 | import sqlite3
conn = sqlite3.connect('spider.sqlite')
cur = conn.cursor()
cur.execute('SELECT * FROM Twitter')
count = 0
for row in cur :
print row
count = count + 1
print count, 'rows.'
cur.close()
| StarcoderdataPython |
100866 | <filename>Programming-Basics-with-Python-April-2019/06_conditional_statements_exercise/06_godzilla_vs_kong.py<gh_stars>0
budget = float(input())
statists = int(input())
one_costume_price = float(input())
decor_price = 0.1 * budget
costumes_price = statists * one_costume_price
if statists >= 150:
costumes_price -= 0.1 * costumes_price
total_price = decor_price + costumes_price
money_left = budget - total_price
money_needed = total_price - budget
if money_left < 0:
print("Not enough money!")
print(f"Wingard needs {money_needed:.2f} leva more.")
else:
print("Action!")
print(f"Wingard starts filming with {money_left:.2f} leva left.")
| StarcoderdataPython |
8152683 | <reponame>y-jeong/RET-SLR<filename>src/1DNPSLR.py<gh_stars>0
# For each given array geometry, this program calculates the extinction cross section, differential scattering cross section (partially driven array), or multi donor-multi acceptor resonance energy transfer (RET) rate
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
from scipy.constants import pi, c, epsilon_0, e, hbar
from objects import Emitter, Donor, Acceptor, Nanoparticle
from nanosystem import Nanosystem
from functions import get_pol_quasistatic, get_pol_Mie, get_red_dielec_fn_Lor
from constants import dp_au_to_si, nano, micro
if __name__ == "__main__":
# Define the parameters that define the system to study
ref_idx_bgr = 1.0 # background refractive index
prm_bgr = 1.0 # background permeability
NP_type = "Ag"
NP_ref_idx_path = "/Users/tigris/Downloads/research/MarcBourgeois/RETSLR/codes/refAg.dat"
num_NP_l = np.array([81])
ns_geom_l = np.array([ [ [400*nano,0,40*nano,0,i] ] for i in num_NP_l ])
ns_geom_l_str = "a400R40".replace(" ", "_")
# [a,b,R,S,N]: for a sublattice,
# a: base spacing
# b: increment of spacing
# R: base radius
# S: increment of radius
# N: number of NPs
# choose what to calculate and set other system parameters
calc_opt_l = ["RET_multiDA_polDA", "RET_multiDA_nonpolDA"]
# ext_crs_tot: all NPs are driven. Total extinction cross section is calculated
# sca_crs_prop: a specified number of particles on the left are driven. Sum of extinction cross sections on the right side of particles is calculated
# RET_multiDA: multi-donor, multi-acceptor RET coupling factor
# _nonpolDA: non-polarizable donors and acceptors. The dipole moments of the donors are given as the input, and the dipole moments of the NPs are calculated. Redundant inputs such as the donor and acceptor polarizabilities are ignored
# _polDA: polarizable donors and acceptors. The driving dipole moments of the donors are given as the input, and then the donor and acceptor polarizabilities are used to calculate the resulting dipole moments of the NPs, donors, and acceptors. Redundant inputs such as the acceptor dipole moments are ignored
NP_iso_pol = True
DA_iso_pol = True
E0 = np.array([0.0, 0.0, 1.0]) # external field. Applies to the extinction crs calculation AND scattering (as a result of propagation) crs calculation
num_NP_drv_l = np.array([40])
num_NP_sca = 1
sca_direction = np.array([1.0, 0.0, 0.0]) # This must be a unit vector
num_don = -1 # number of donors; -1 -> once above each NP
d_don = 2 * nano # distance from the NP surface; +z direction
omega_0_don = 2*pi*c/(405e-9) # s-1
gamma_don = 0.1 * e / hbar # quite arbitrary for now
osc_str_don = 0.2
rad_don = 5.0e-9
num_acc = 1
d_acc = 2 * nano
omega_0_acc = 2*pi*c/(405e-9)
gamma_acc = 0.1 * e / hbar
osc_str_acc = 0.1
rad_acc = 10.0e-9
wvln_begin = 250 * nano # wavelengths are that of in vacuum [nm]
wvln_end = 650 * nano # Note that the input and the output are in wavelength,
wvln_cnt = 401 # but the caculation is done in (converted) angular frequency (omega)
wvln_begin_dielec = 250 * nano
wvln_end_dielec = 650 * nano
wvln_cnt_dielec = 401
# output parameters #
out_base_dir = "./"
out_dir = out_base_dir + "RefIdxBgr{:.4f}".format(ref_idx_bgr) + "PrmbtyBgr{:.4f}".format(prm_bgr) + "NPType" + NP_type + "nsGeom" + ns_geom_l_str + "/"
print(out_dir)
try:
os.mkdir(out_dir)
except OSError:
print("Creation of the output directory failed")
else:
print("Created the working directory {:s}".format(out_dir))
wvln_l = np.linspace(wvln_begin, wvln_end, wvln_cnt) # for the calculation and plot
omega_l = 2*pi*c / wvln_l
wvln_dielec_l = np.linspace(wvln_begin_dielec, wvln_end_dielec, wvln_cnt_dielec) # for the plot of the dielectric function of the donors and acceptors
omega_dielec_l = 2*pi*c / wvln_dielec_l
ref_idx_data = np.loadtxt(NP_ref_idx_path, skiprows=5)
ref_idx_wvln_l = ref_idx_data[:,0] * micro # [m]
ref_idx_omega_l = 2*pi*c / ref_idx_wvln_l
ref_idx_nk_l = np.array([ complex(ref_idx_data[i,1], ref_idx_data[i,2]) for i in np.arange(np.shape(ref_idx_data)[0]) ])
s_l = ref_idx_nk_l / ref_idx_bgr
pol_omega_l = ref_idx_omega_l
for calc_opt in calc_opt_l:
if calc_opt == "ext_crs_tot":
out_fig_path0 = out_dir + "extCrsTot.pdf"
fig0, ax0 = plt.subplots(1, 1, figsize=(8,6))
#ax.set_title("", fontsize=24)
ax0.set_ylabel("Ext. efficiency", fontsize=14)
ax0.set_xlabel("Wavelength (nm)", fontsize=14)
ax0.tick_params(axis='x', labelsize=12, labelrotation=0, labelcolor='black')
ax0.tick_params(axis='y', labelsize=12, labelrotation=0, labelcolor='black')
#ax.set_yscale('log')
elif calc_opt == "sca_crs_prop":
out_fig_path1 = out_dir + "scaCrsProp.pdf"
fig1, ax1 = plt.subplots(1, 1, figsize=(8,6))
#ax.set_title("", fontsize=24)
ax1.set_ylabel("Diff. sca. efficiency", fontsize=14)
ax1.set_xlabel("Wavelength (nm)", fontsize=14)
ax1.tick_params(axis='x', labelsize=12, labelrotation=0, labelcolor='black')
ax1.tick_params(axis='y', labelsize=12, labelrotation=0, labelcolor='black')
#ax.set_yscale('log')
elif calc_opt == "RET_multiDA_nonpolDA" or calc_opt == "RET_multiDA_polDA":
out_base_name2 = "RETMultiDA"
iso_pol_str2 = "NPIso" + str(NP_iso_pol)
out_fig_path2 = out_dir + out_base_name2 + iso_pol_str2 + ".pdf"
fig2, ax2 = plt.subplots(1, 2, figsize=(12,6))
#ax[0].set_title("", fontsize=24)
ax2[0].set_ylabel("Coupling factor M ($N^{2} \cdot C^{-4} \cdot m^{-2}$)", fontsize=14)
ax2[0].set_xlabel("Wavelength (nm)", fontsize=14)
ax2[0].tick_params(axis='x', labelsize=12, labelrotation=0, labelcolor='black')
ax2[0].tick_params(axis='y', labelsize=12, labelrotation=0, labelcolor='black')
ax2[0].set_yscale('log')
#ax[1].set_title("", fontsize=24)
ax2[1].set_ylabel("Enhancement factor $M^{NPs}/M^{0}$", fontsize=14)
ax2[1].set_xlabel("Wavelength (nm)", fontsize=14)
ax2[1].tick_params(axis='x', labelsize=12, labelrotation=0, labelcolor='black')
ax2[1].tick_params(axis='y', labelsize=12, labelrotation=0, labelcolor='black')
ax2[1].set_yscale('log')
else:
sys.exit("unknown calc_opt 0")
ns_cnt = 0
for ns_geom in ns_geom_l:
ns_id = "{:d} NPs".format(num_NP_l[ns_cnt])
print("For {:s}:".format(ns_id))
NP_l = []
NP_l0 = [] # an empty NP array for enhancement factor calculation
num_NP = 0 # Note that this is not necessarily an element of num_NP_l
cursor = 0 # The position for the beginning of the next sublattice
for sub_geom in ns_geom:
a = sub_geom[0]
b = sub_geom[1]
R = sub_geom[2]
S = sub_geom[3]
N = int(sub_geom[4])
sub_NP_l = [ Nanoparticle(np.array([cursor + i*a + (i-1)*i/2*b, 0.0, 0.0]), R+i*S) for i in np.arange(N) ]
NP_l.extend(sub_NP_l)
cursor += N*a + (N-1)*N/2*b # place the cursor at the position of the "phantom" NP on the right side of the sublattice
num_NP += N
# set the dipole polarizabilities of the NPs from the Mie theory
for NP in NP_l:
NP.set_interp_pol_Mie(pol_omega_l, s_l, ref_idx_bgr)
for calc_opt in calc_opt_l:
print("calc_opt: " + calc_opt)
if calc_opt == "ext_crs_tot":
# Build the Nanosystem class
don_l = []
acc_l = []
ns = Nanosystem(don_l, acc_l, NP_l, ref_idx_bgr, prm_bgr, E0) # don_l and acc_l are empty
ns_geom_crs_tot = ns.get_geom_crs_tot()
ext_crs_tot_l = np.empty(wvln_cnt)
print(" Calculation of the total extinction cross section:")
for l in np.arange(wvln_cnt):
ext_crs_tot_l[l] = ns.get_ext_crs_tot(omega_l[l])
ext_eff_tot_l = ext_crs_tot_l / ns_geom_crs_tot
out_file_ext_crs_path = out_dir + "extCrsTotNS{:04d}.out".format(ns_cnt)
out_file_ext_crs = open(out_file_ext_crs_path, 'w')
out_file_ext_crs.write("wavelength (nm) frequency ($s^-1$) ext. crs. tot.($m^2$) ext. eff. tot.\n")
for l in np.arange(wvln_cnt):
out_file_ext_crs.write("{:.4E} {:.4E} {:.8E} {:.8E}\n".format(wvln_l[l], omega_l[l], ext_crs_tot_l[l], ext_eff_tot_l[l]))
out_file_ext_crs.close()
line_id = ns_id
line_ext_eff_tot = ax0.plot(wvln_l/nano, ext_eff_tot_l, '-', label=line_id)
elif calc_opt == "sca_crs_prop":
# Build the Nanosystem class
don_l = []
acc_l = []
ns = Nanosystem(don_l, acc_l, NP_l, ref_idx_bgr, prm_bgr, E0) # don_l and acc_l are empty
ns_geom_crs_prop = ns.get_geom_crs_rgt(num_NP_sca)
print(" Calculation of the scattering cross section by propagation:")
for num_NP_drv in num_NP_drv_l:
print(" where the left {:d} NPs are driven...".format(num_NP_drv))
diff_sca_crs_prop_l = np.empty(wvln_cnt)
for l in np.arange(wvln_cnt):
diff_sca_crs_prop_l[l] = ns.get_diff_sca_crs_prop(num_NP_drv, num_NP_sca, omega_l[l], sca_direction)
diff_sca_eff_prop_l = diff_sca_crs_prop_l / ns_geom_crs_prop
out_file_diff_sca_crs_path = out_dir + "DiffScaCrsPropNS{:04d}NumDrvNP{:04d}.out".format(ns_cnt, num_NP_drv)
out_file_diff_sca_crs = open(out_file_diff_sca_crs_path, 'w')
out_file_diff_sca_crs.write("wavelength (nm) frequency ($s^-1$) diff. sca. crs. prop.($m^2$) diff. sca. eff. prop.\n")
for l in np.arange(wvln_cnt):
out_file_diff_sca_crs.write("{:.4E} {:.4E} {:.8E} {:.8E}\n".format(wvln_l[l], omega_l[l], diff_sca_crs_prop_l[l], diff_sca_eff_prop_l[l]))
out_file_diff_sca_crs.close()
line_id = "{:3d} NPs driven".format(num_NP_drv)
line_diff_sca_eff_prop = ax1.plot(wvln_l/nano, diff_sca_eff_prop_l, '-', label=line_id)
elif calc_opt == "RET_multiDA_nonpolDA" or calc_opt == "RET_multiDA_polDA":
NP_idx_begin = 0
NP_idx_end = num_NP
NP_ctr_idx = int( NP_idx_begin + int( (num_NP-1)/2 ) )
if num_don == -1:
don_idx_begin = NP_idx_begin
don_idx_end = NP_idx_end
elif num_don > 0:
# The placement of donors are suttle in the case of odd/even or even/odd num_NP and num_don
don_idx_begin = int( NP_ctr_idx - int( (num_don-1)/2 ) )
don_idx_end = int( NP_ctr_idx + int( num_don/2 ) + 1 )
else:
raise ValueError("invalid num_don")
don_idx_l = np.arange(don_idx_begin, don_idx_end)
if num_acc > 0:
acc_idx_begin = NP_ctr_idx - int( (num_acc-1)/2 )
acc_idx_end = NP_ctr_idx + int( num_acc/2 ) + 1
else:
raise ValueError("invalid num_acc")
acc_idx_l = np.arange(acc_idx_begin, acc_idx_end)
r_don_l = np.array([ NP_l[i].get_pos() + np.array([0.0, 0.0, NP_l[i].get_rad()]) + np.array([0.0, 0.0, d_don]) for i in don_idx_l ])
r_acc_l = np.array([ NP_l[i].get_pos() + np.array([0.0, 0.0,-NP_l[i].get_rad()]) + np.array([0.0, 0.0,-d_acc]) for i in acc_idx_l ])
p_don_l = np.array([ np.array([ 0.0, 0.0, 5.0]) * dp_au_to_si for i in don_idx_l ]) # donor dipole moments [C m]
p_acc_l = np.array([ np.array([ 0.0, 0.0, -5.0]) * dp_au_to_si for i in acc_idx_l ]) # acceptor dipole moments [C m]
omega_0_don_l = np.full(np.shape(don_idx_l)[0], omega_0_don)
omega_0_acc_l = np.full(np.shape(acc_idx_l)[0], omega_0_acc)
gamma_don_l = np.full(np.shape(don_idx_l)[0], gamma_don)
gamma_acc_l = np.full(np.shape(acc_idx_l)[0], gamma_acc)
osc_str_don_l = np.full(np.shape(don_idx_l)[0], osc_str_don)
osc_str_acc_l = np.full(np.shape(acc_idx_l)[0], osc_str_acc)
rad_don_l = np.full(np.shape(don_idx_l)[0], rad_don)
rad_acc_l = np.full(np.shape(acc_idx_l)[0], rad_acc)
don_l = [ Donor(r_don_l[i], p_don_l[i]) for i in np.arange(np.shape(don_idx_l)[0]) ]
for i in np.arange(np.shape(don_idx_l)[0]):
don_l[i].set_Lor_sph_params(omega_0_don_l[i], gamma_don_l[i], osc_str_don_l[i], rad_don_l[i])
acc_l = [ Acceptor(r_acc_l[i], p_acc_l[i]) for i in np.arange(np.shape(acc_idx_l)[0]) ]
for i in np.arange(np.shape(acc_idx_l)[0]):
acc_l[i].set_Lor_sph_params(omega_0_acc_l[i], gamma_acc_l[i], osc_str_acc_l[i], rad_acc_l[i])
# Build the Nanosystem class
ns = Nanosystem(don_l, acc_l, NP_l, ref_idx_bgr, prm_bgr, E0)
ns0 = Nanosystem(don_l, acc_l, NP_l0, ref_idx_bgr, prm_bgr, E0)
if calc_opt == "RET_multiDA_nonpolDA":
line_id_acc = "nonpolarizable DA"
# Calculate the list of coupling factors |e_A . E_D|^2 / |p_D|^2 for each acceptor (columns) for each wavelength (rows)
cpl_fac_ll = np.array([ ns.get_coupling_factor_1dNP_multiDA_nonpolDA(omega_l[i], NP_iso_pol=NP_iso_pol) for i in np.arange(wvln_cnt) ]) # Note that iso_pol decides the linear algebra routine (since isotropic polarizabilities lead to a symmetric interaction matrix)
## Make the structure with no NPs and calculate coupling factors ##
cpl_fac_ll0 = np.array([ ns0.get_coupling_factor_1dNP_multiDA_nonpolDA(omega_l[i], NP_iso_pol=NP_iso_pol) for i in np.arange(wvln_cnt) ])
else:
line_id_acc = "polarizable DA"
cpl_fac_ll = np.array([ ns.get_coupling_factor_1dNP_multiDA_polDA(omega_l[i], NP_iso_pol=NP_iso_pol, DA_iso_pol=DA_iso_pol) for i in np.arange(wvln_cnt) ])
cpl_fac_ll0 = np.array([ ns0.get_coupling_factor_1dNP_multiDA_polDA(omega_l[i], NP_iso_pol=NP_iso_pol, DA_iso_pol=DA_iso_pol) for i in np.arange(wvln_cnt) ])
## Calculation of the enhancement factors ##
enhan_fac_ll = cpl_fac_ll / cpl_fac_ll0
# Write the result on the output file
out_file_path = out_dir + out_base_name2 + iso_pol_str2 + "NS{:04d}.out".format(ns_cnt)
out_file = open(out_file_path, 'w')
out_file.write("wavelength (nm) frequency (s^-1) coupling factor (N^2 C^-4 m^-2) enhancement factor\n")
for i in np.arange(wvln_cnt):
out_file.write("{:.4E} {:.4E} ".format(wvln_l[i], omega_l[i]) + " ".join([ "{:.8E}".format(cpl_fac) for cpl_fac in cpl_fac_ll[i] ]) + " " + " ".join([ "{:.8E}".format(enhan_fac) for enhan_fac in enhan_fac_ll[i] ]) + "\n")
out_file.close()
# Plot
for acc_idx_zeroed in np.arange(np.shape(acc_idx_l)[0]):
line_cpl_fac = ax2[0].plot(wvln_l/nano, cpl_fac_ll[:,acc_idx_zeroed], '-', label=line_id_acc)
line_enhan_fac = ax2[1].plot(wvln_l/nano, enhan_fac_ll[:,acc_idx_zeroed], '-', label=line_id_acc)
else:
sys.exit("unknown calc_opt 1")
ns_cnt += 1
# Additional plotting options
left = 0.125 # the left side of the subplots of the figure
right = 0.9 # the right side of the subplots of the figure
bottom = 0.2 # the bottom of the subplots of the figure
top = 0.9 # the top of the subplots of the figure
wspace = 0.4 # the amount of width reserved for blank space between subplots
hspace = 0.2 # the amount of height reserved for white space between subplots
plt.subplots_adjust(left, bottom, right, top, wspace, hspace)
#plt.tight_layout()
#plt.legend(loc="best", fontsize=14)
for calc_opt in calc_opt_l:
if calc_opt == "ext_crs_tot":
line_text_ext_eff = "$n$ = {:.4f}\n$\mu$ = {:.4f}\n".format(ref_idx_bgr, prm_bgr) + NP_type + " NPs"
plt.text(0.8, 0.3, line_text_ext_eff, horizontalalignment='center', verticalalignment='center', transform=ax0.transAxes, fontsize=12)
ax0.legend(loc="best", fontsize=12)
plt.savefig(out_fig_path0)
elif calc_opt == "sca_crs_prop":
line_text_diff_sca_eff = "# scattering NPs: {:d}\nscattering direction: {:s}".format(num_NP_sca, " ".join([ str(item) for item in sca_direction ]))
plt.text(0.8, 0.7, line_text_diff_sca_eff, horizontalalignment='center', verticalalignment='center', transform=ax1.transAxes, fontsize=12)
ax1.legend(loc="best", fontsize=12)
plt.savefig(out_fig_path1)
elif calc_opt == "RET_multiDA_nonpolDA" or calc_opt == "RET_multiDA_polDA":
ax2[0].legend(loc="best", fontsize=12)
ax2[1].legend(loc="best", fontsize=12)
plt.savefig(out_fig_path2)
else:
sys.exit("Unknown calc_opt 2")
out_base_name4 = "DADielecFn"
out_fig_path4 = out_dir + out_base_name4 + ".pdf"
fig4, ax4 = plt.subplots(1, 1, figsize=(8,6))
ax4t = ax4.twinx()
ax4.set_ylabel("Re(eps)", fontsize=14, color='red')
ax4t.set_ylabel("Im(eps)", fontsize=14, color='blue')
ax4.set_xlabel("Wavelength (nm)", fontsize=14)
line_text4 = r'$\omega_{0}$' + ": " + "{:.2f}nm".format(2*pi*c/omega_0_don/nano) + "\n" + r'$\gamma$' + ": {:.2f}eV".format(gamma_don/e*hbar) + "\n" + "osc_str: {:.4f}".format(osc_str_don)
plt.text(0.8, 0.3, line_text4, horizontalalignment='center', verticalalignment='center', transform=ax4.transAxes, fontsize=12)
eps_don = get_red_dielec_fn_Lor(omega_dielec_l, omega_0_don, gamma_don, osc_str_don, ref_idx_bgr, eps_inf=1.0)
ax4.plot(wvln_dielec_l/nano, np.real(eps_don), color='red')
ax4t.plot(wvln_dielec_l/nano, np.imag(eps_don), color='blue')
plt.savefig(out_fig_path4)
| StarcoderdataPython |
11359038 | from funcoes import *
def minimo_percurso(grafo,rota):
c=0
for i in range(len(rota)):
if i==len(rota)-1:
break
c=grafo[rota[i]][rota[i+1]]+c
return c
def mudar(lista,index1,index2):
lista[index1],lista[index2]=lista[index2],lista[index1]
return lista
def sortear_numero(rota,start=None,end=None):
if start==None and end==None:
start,end=1,len(rota)-1
n1,n2=random.sample(range(start,end),2)
return n1,n2
def melhor_rota(rota_atual,rota_nova,custo_atual,custo_novo):
if custo_novo<custo_atual:
return rota_nova,custo_novo
else:
return rota_atual,custo_atual
#dicionario dij.grafo={}
#lista string dij.vertices=[]
class HeuristicaBuscaLocal:
def __init__(self,dij=None,rota_inicial=None,custo_obtido=None):
self.dij=dij
self.rota_inicial=rota_inicial
self.custo_obtido=custo_obtido
def mudar_rota(self,nova_rota):
self.rota_inicial=nova_rota
return self.rota_inicial
def obter_solucao(self,depth=None,custo_otimo=None,custo_obtido=None):
rota_atual=self.rota_inicial[0:]
custo_atual=self.custo_obtido
c=0
p=custo_atual
for i in range(depth):
n1,n2=sortear_numero(rota_atual)
rota_nova=rota_atual[0:]
mudar(rota_nova,n1,n2)
custo_novo=minimo_percurso(self.dij.grafo,rota_nova)
print(f'interação {i}: custo novo obtido:{custo_novo}')
print(f'Caminho {" -> ".join(rota_nova)}')
print("_"*150)
rota_atual,custo_atual=melhor_rota(rota_atual,rota_nova,custo_atual,custo_novo)
if custo_atual<=custo_otimo+10:
print("Solução ideal:{} e {}.".format(custo_otimo,custo_atual))
break
if custo_atual<custo_obtido:
print("Custo encontrado tem um valor mais otimizado do que obtido:{} e {}.".format(custo_obtido,custo_atual))
break
if custo_atual==p:
c=c+1
if c==50:
print("50 valores localizados parando.")
break
print(c)
return rota_atual,custo_atual
class HeuristicaGulosaAleatoria:
def __init__(self,dij):
self.dij=dij
def obter_solucao(self,custo_otimo):
rota=['1']
| StarcoderdataPython |
11241670 | # Generated by Django 2.2.12 on 2020-06-14 02:08
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tools', '0004_auto_20200614_0208'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('management', '0004_auto_20200413_0659'),
]
operations = [
migrations.RemoveField(
model_name='groupoftitle',
name='name',
),
migrations.RemoveField(
model_name='groupoftitle',
name='position',
),
migrations.RemoveField(
model_name='timekeeping',
name='age',
),
migrations.RemoveField(
model_name='timekeeping',
name='emo',
),
migrations.RemoveField(
model_name='timekeeping',
name='image_link',
),
migrations.RemoveField(
model_name='timekeeping',
name='last_checkin',
),
migrations.RemoveField(
model_name='timekeeping',
name='last_checkout',
),
migrations.RemoveField(
model_name='timekeeping',
name='name',
),
migrations.RemoveField(
model_name='timekeeping',
name='prob',
),
migrations.AddField(
model_name='groupoftitle',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='timekeeping',
name='image_detail',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.SET_NULL, to='tools.ImageDetail'),
),
migrations.AddField(
model_name='timekeeping',
name='user',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='camera',
name='status',
field=models.CharField(choices=[('Active', 'ACTIVE'), ('Paused', 'PAUSED'), ('Disabled', 'DISABLED')], default='Paused', max_length=10),
),
migrations.CreateModel(
name='UserExtraData',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('avatar', models.ImageField(upload_to='')),
('age', models.IntegerField()),
('about_me', models.TextField()),
('organization', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='management.Organization')),
],
),
]
| StarcoderdataPython |
1742893 | #!/usr/bin/python3
import copy
import time
with open('21_input', 'r') as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
ingredients_list = []
allergens_list = []
for l in lines:
left, right = l.split('(')
ingredients = left.strip().split()
allergens = [a.replace(',','') for a in right.strip()[9:-1].split()]
ingredients_list.append(ingredients)
allergens_list.append(allergens)
#print(ingredients_list)
#print(allergens_list)
all_ingredients = set([item for sublist in ingredients_list for item in sublist])
all_allergens = set([item for sublist in allergens_list for item in sublist])
allergens_map = {a: copy.deepcopy(all_ingredients) for a in all_allergens}
allergy_ingredients = set()
def matched(allergens_map):
return all([len(i) == 1 for i in allergens_map.values()])
iter_count = 0
while not matched(allergens_map):
#print(allergens_map, allergy_ingredients)
for a, i in allergens_map.items():
if len(i) != 1:
allergens_map[a] -= allergy_ingredients
for a_list, i_list in zip(allergens_list, ingredients_list):
for a in a_list:
allergens_map[a] &= set(i_list)
for a, i in allergens_map.items():
if len(i) == 1:
allergy_ingredients.add(next(iter(i)))
iter_count += 1
#print(iter_count)
time.sleep(0.2)
#print(allergy_ingredients)
#print(allergens_map)
allergy_ingredient_pairs = [(a,next(iter(allergens_map[a]))) for a in allergens_map if len(allergens_map[a]) == 1]
allergy_ingredient_pairs = sorted(allergy_ingredient_pairs)
canonical_dangerous_ingredient_list = ','.join([p[1] for p in allergy_ingredient_pairs])
print(canonical_dangerous_ingredient_list)
| StarcoderdataPython |
11272691 | # -*- coding: utf-8 -*-
import os
import time
import hashlib
from django.conf import settings
import requests
dataroot = "pool/"
scriptsroot = "jscharts/"
phantomjs = "phantomjs/"
staticjs = "d3b/static/"
env = "d3b_py2/"
tempdir = "tmp/"
archivepath = "/var/www/html/server/pool/"
def submit_job( reqf, jobname ):
abspath = settings.BASE_DIR + '/'
job = str( hashlib.md5( str( time.time() ).encode() ).hexdigest() )
os.mkdir( abspath + dataroot + job )
os.chdir( abspath + dataroot + job )
f = reqf[ 'file' ]
if len( f.name ) > 4 and f.name[ -4: ].lower() == "biom":
with open( abspath + dataroot + job + '/emap.biom', 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
cmd = "python " + abspath + scriptsroot + "biom2emap.py emap.biom >emap.txt 2>convert.err"
os.system( cmd )
else:
with open( abspath + dataroot + job + '/emap.txt', 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
with open( abspath + dataroot + job + '/name', 'wt+') as destination:
destination.write( jobname )
os.chdir( abspath )
return job
def run_script( params, job, script ):
abspath = settings.BASE_DIR + '/'
jobpath = abspath + dataroot + job
if not os.path.isdir( jobpath ):
jobpath = archivepath + job
nparams = params
if len( nparams ) > 0:
nparams += "&"
nparams += "&datapath=" + jobpath
os.putenv( "QUERY_STRING", nparams )
if False:
cmd = abspath + scriptsroot + "runscript.py " + jobpath + " " + script + " regular"
res = os.popen( cmd ).read()
cmd = "python " + abspath + scriptsroot + script + ".py" + " 2>python.err"
res = os.popen( cmd ).read()
#print(res)
if os.path.isfile( "python.err" ):
errres = open( "python.err" ).read()
print( errres )
#with open( "python.err" ) as f:
# res += f.read()
#os.chdir( abspath )
return res
def render_png( params, job, script, host, jscripts ):
abspath = settings.BASE_DIR + '/'
jobpath = abspath + dataroot + job
if not os.path.isdir( jobpath ):
jobpath = archivepath + job
os.putenv( "QUERY_STRING", params + "&datapath=" + jobpath + "&resolution=high" )
res = os.popen( "python " + abspath + scriptsroot + script + ".py" ).read()
tempname = abspath + tempdir + job
fo = open( tempname + ".html", "w" )
fo.write( "<html><head>\n" )
host = "file://" + abspath + staticjs
for jscript in jscripts:
fo.write( "<script src=\"%s\"></script>\n" % ( host + jscript ) )
fo.write( "</head><body style=\"background-color:#f2f2f2;\">\n" )
fo.write( res )
fo.write( "</body></html>" )
fo.close()
cmd = abspath + phantomjs + "phantomjs " + abspath + phantomjs + "d3b_render.js %s.html %s.png" % ( tempname, tempname )
print( cmd )
os.system( cmd )
pngres = ""
if os.path.isfile( tempname + ".png" ):
with open( tempname + ".png", "rb" ) as f:
pngres = f.read()
os.remove( tempname + ".png" )
return pngres
def render_svg( params, job, script, host, jscripts ):
abspath = settings.BASE_DIR + '/'
jobpath = abspath + dataroot + job
if not os.path.isdir( jobpath ):
jobpath = archivepath + job
os.putenv( "QUERY_STRING", params + "&datapath=" + jobpath + "&resolution=high" )
res = os.popen( "python " + abspath + scriptsroot + script + ".py" ).read()
tempname = abspath + tempdir + job
fo = open( tempname +".html", "w" )
fo.write( "<html><head>\n" )
host = "file://" + abspath + staticjs
for jscript in jscripts:
fo.write( "<script src=\"%s\"></script>\n" % ( host + jscript ) )
fo.write( "</head><body>\n" )
fo.write( res )
fo.write( "</body></html>" )
fo.close()
cmd = abspath + phantomjs + "phantomjs " + abspath + phantomjs + "d3b_savepage.js %s.html %s_rendered.html" % ( tempname, tempname )
print( cmd )
os.system( cmd )
svgres = ""
if os.path.isfile( tempname + "_rendered.html" ):
with open( tempname + "_rendered.html" ) as f:
htmlres = f.read()
svgbeg = htmlres.find( "<svg" )
svgend = htmlres.rfind( "</svg>" )
if svgbeg != -1 and svgend != -1:
svgres = htmlres[ svgbeg : svgend + 6 ]
return svgres
ss = """
<?php
include "std_include.php";
$name = $_POST[ 'name' ];
$datatype = $_POST['datatype'];
if (!isset( $_POST['input'] ) ) exit();
$input = $_POST['input'];
$job = md5( microtime() );
system( "mkdir pool/$job" );
system( "chmod 777 pool/$job" );
write_name( $job, $name );
$tag = substr( $input, 1, 3 );
$tag1 = substr( $input, 0, 1 );
chdir( "pool/$job" );
if ( $tag == "HDF" || $tag1 == "{" || !is_numeric( $tag1 ) )
{
$fd = fopen( "input.biom", "w" );
fwrite( $fd, $input );
fclose( $fd );
system( "python ../../aux/biom2emap.py input.biom >emap.txt" );
}
else
{
$fd = fopen( "emap.txt", "w" );
$sinp = explode( "\n", $input );
foreach ( $sinp as $k=>$v )
{
fwrite( $fd, rtrim( $v ) . "\t\n" );
}
fclose( $fd );
}
exec( "python ../../emap/init_tags.py" );
echo "Job id: ".$job;
?>
""" | StarcoderdataPython |
5189298 | <filename>magni/cs/reconstruction/sl0/_sigma_start.py
"""
..
Copyright (c) 2015-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing functions for calculating the starting value of sigma used in
the Smoothed l0 algorithms.
Routine listings
----------------
calculate_using_fixed(var)
Calculate the fixed sigma value.
calculate_using_reciprocal(var)
Calculate a sigma value in a 'reciprocal' way.
get_function_handle(method)
Return a function handle to a given calculation method.
"""
from __future__ import division
def wrap_calculate_using_fixed(var):
"""
Arguments wrapper for calculate_using_fixed.
"""
convert = var['convert']
sigma_start = convert(var['param']['sigma_start_fixed'])
def calculate_using_fixed():
"""
Calculate the fixed sigma value.
Parameters
----------
var : dict
Dictionary of variables used in the calculation of the sigma
value.
Returns
-------
sigma : float
The sigma value to be used in the SL0 algorithm.
"""
return sigma_start
return calculate_using_fixed
def wrap_calculate_using_reciprocal(var):
"""
Arguments wrapper for calculate_using_reciprocal.
"""
convert = var['convert']
delta = var['A'].shape[0] / var['A'].shape[1]
factor = convert(var['param']['sigma_start_reciprocal'])
def calculate_using_reciprocal():
"""
Calculate a sigma value in a 'reciprocal' way.
Parameters
----------
var : dict
Dictionary of variables used in the calculation of the sigma value.
Returns
-------
sigma : float
The sigma value to be used in the SL0 algorithm.
"""
return 1 / (factor * delta)
return calculate_using_reciprocal
def get_function_handle(method, var):
"""
Return a function handle to a given calculation method.
Parameters
----------
method : str
Identifier of the calculation method to return a handle to.
var : dict
Local variables needed in the sigma method.
Returns
-------
f_handle : function
Handle to the calculation method defined in this globals scope.
"""
return globals()['wrap_calculate_using_' + method](var)
| StarcoderdataPython |
1869885 | import os
import logging
from digi import mount
def run():
# TBD read from the mount model
# TBD dq ->
mount.Mounter(os.environ["GROUP"],
os.environ["VERSION"],
os.environ["PLURAL"],
os.environ["NAME"],
os.environ.get("NAMESPACE", "default"),
log_level=int(os.environ.get("LOGLEVEL", logging.INFO))) \
.start()
if __name__ == '__main__':
run()
| StarcoderdataPython |
1826343 | <filename>seismoTK/S_Filter.py
from matplotlib.colors import Colormap
from . import Polarization
class S_Filter(Polarization):
def S(self):
#import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
self.SF = self.Pol.drop(columns=["LIN","BAZ"])
x,y,z=self.xyz2grd(self.SF["TIME"],self.SF["FREQ"],self.SF["DOP"],xn=3600,yn=100)
def plotspec(self,psx, cmap, lofreq=None, hifreq=None, t1=None, t2=None):
import matplotlib.pyplot as plt
extent = [0,0,0,0]
if t1 != None and t2 != None:
extent[0] = t1
extent[1] = t2
if lofreq != None:
extent[2] = lofreq
if hifreq != None:
extent[3] = hifreq
plt.imshow(psx, cmap=cmap, extent=extent, aspect='auto', origin='lower')
plt.yscale("log")
cbar=plt.colorbar(sc,orientation='horizontal')
plt.show() | StarcoderdataPython |
3554973 | """
doc
"""
# message module
from .schemas.types import ( # type: ignore
# Message
MessageType,
MessagePayload,
# Contact
ContactGender,
ContactType,
ContactPayload,
# Friendship
FriendshipType,
FriendshipPayload,
# Room
RoomPayload,
RoomMemberPayload,
# UrlLink
# RoomInvitation
RoomInvitationPayload,
# Image
ImageType,
# Event
EventType,
)
from .puppet import (
Puppet,
PuppetOptions
)
from .file_box import FileBox
from .schemas.message import (
MessageQueryFilter,
)
from .schemas.contact import (
ContactQueryFilter
)
from .schemas.friendship import (
FriendshipSearchQueryFilter
)
from .schemas.room import (
RoomQueryFilter,
RoomMemberQueryFilter,
)
from .schemas.url_link import UrlLinkPayload
from .schemas.mini_program import MiniProgramPayload
from .schemas.event import (
EventScanPayload,
ScanStatus,
EventDongPayload,
EventLoginPayload,
EventReadyPayload,
EventLogoutPayload,
EventResetPayload,
EventRoomTopicPayload,
EventRoomLeavePayload,
EventRoomJoinPayload,
EventRoomInvitePayload,
EventMessagePayload,
EventHeartbeatPayload,
EventFriendshipPayload,
EventErrorPayload
)
from .logger import get_logger
__all__ = [
'Puppet',
'PuppetOptions',
'ContactGender',
'ContactPayload',
'ContactQueryFilter',
'ContactType',
'FileBox',
'FriendshipType',
'FriendshipSearchQueryFilter',
'FriendshipPayload',
'MessagePayload',
'MessageQueryFilter',
'MessageType',
'UrlLinkPayload',
'RoomQueryFilter',
'RoomPayload',
'RoomMemberQueryFilter',
'RoomMemberPayload',
'RoomInvitationPayload',
'MiniProgramPayload',
'EventScanPayload',
'ScanStatus',
'EventDongPayload',
'EventLoginPayload',
'EventReadyPayload',
'EventLogoutPayload',
'EventResetPayload',
'EventFriendshipPayload',
'EventHeartbeatPayload',
'EventMessagePayload',
'EventRoomInvitePayload',
'EventRoomJoinPayload',
'EventRoomLeavePayload',
'EventRoomTopicPayload',
'EventErrorPayload',
'ImageType',
'EventType',
'get_logger'
]
| StarcoderdataPython |
6565215 | <reponame>Draculinio/CherryAS
from Driver import *
from Capabilities import *
caps = Capabilities()
caps.add_desired_capability('browserName', 'chrome')
caps.add_chrome_option('binary', "C:\\Program Files (x86)\\Google Chrome\\Application\\chrome.exe")
caps.add_desired_capability('platform', 'ANY')
driver = Driver()
driver.start(caps.capability, '9000', 'chrome')
driver.navigate("http://www.duckduckgo.com")
print(driver.get_url())
print(driver.get_title())
driver.fullscreen()
driver.minimize()
driver.maximize()
# Start manipulating elements
#search_input = driver.get_element('xpath', '//*[@id ="search_form_input_homepage"]')
search_input = driver.get_element('id', 'search_form_input_homepag') #Change this
search_button = driver.get_element('xpath', '//*[@id ="search_button_homepage"]')
privacy = driver.get_element_by_property('class', 'js-popout-link js-showcase-popout ddgsi ddgsi-down')
#duckduckgo_logo = driver.get_element('link text', 'About DuckDuckGo')
#span = driver.get_element('tag name', 'span')
#driver.click(duckduckgo_logo)
#driver.back()
#driver.click(span)
#driver.back()
driver.write(search_input, 'Draculinio')
driver.click(search_button)
driver.direction('back')
driver.direction('forward')
driver.back()
driver.forward()
driver.refresh()
#Legacy (it works but not recomended)
#driver.new_window()
#element = Elements(host, session_id)
#interaction = Interactions(host, session_id)
#search_input = element.get_element('xpath', '//*[@id ="search_form_input_homepage"]')
#search_button = element.get_element('xpath', '//*[@id ="search_button_homepage"]')
#interaction.write(search_input, 'Draculinio')
#interaction.click(search_button)
driver.close_browser()
driver.quit()
| StarcoderdataPython |
8049170 | from base_analayzer import BaseAnalyzer
from datas.waypoint import Waypoint
import copy
import utils.utility as util
OFFSET = 1.05
class LaneAnalyzer(BaseAnalyzer):
def exists_fields(self, measurement, vehicle):
return self.influxdb_accessor.exists_field(measurement, vehicle + "_road") and \
self.influxdb_accessor.exists_field(measurement, vehicle + "_lane") and \
self.influxdb_accessor.exists_field(
measurement, vehicle + "_waypoint_index")
def get_vehicle_data(self, measurement, vehicles):
"""
:param measurement:
:param vehicles:
:return:
"""
vehicle_dict = {}
for vehicle in vehicles:
query = 'select {0}_x, {0}_y, {0}_z from "{1}"'.format(
vehicle, measurement)
results = self.influxdb_accessor.query(query)
if results is not None:
vehicle_dict[vehicle] = results
return vehicle_dict
def get_waypoint_data(self, map_id):
"""
:param map_id:
:return:
"""
datas = self.mongo_accessor.get_waypoints_from_map_id(map_id)
waypoints_list = []
for road in datas.get('roads', []):
road_name = road.get('name')
for lane in road.get('lanes', []):
lane_name = lane.get('name')
lane_width = lane.get('width')
for waypoint in lane.get('waypoints', []):
waypoint_index = waypoint.get('index')
point = waypoint.get('point')
waypoint_obj = Waypoint(
road_name, lane_name, waypoint_index, lane_width, point)
waypoints_list.append(waypoint_obj)
return {datas["gid"]: waypoints_list}
def calc_range(self, vehicle, vehicle_datas, waypoint_dict):
"""
:param vehicle:
:param vehicle_datas:
:param waypoint_dict:
:return:
"""
datas = []
for vehicle_data in vehicle_datas:
vehicle_x = vehicle_data.get(vehicle + "_x")
# threejsとUE4で、y, z軸の入れ替えが必要
vehicle_z = -vehicle_data.get(vehicle + "_y")
prev_data = None
prev = None
for _id, waypoint_datas in waypoint_dict.items():
for waypoint_data in waypoint_datas:
waypoint_x = waypoint_data.get_x()
waypoint_z = waypoint_data.get_z()
result = util.calc_distance(
vehicle_x, vehicle_z, waypoint_x, waypoint_z)
if prev is None or result < prev:
# 初期値設定 or 最小値設定
prev = result
prev_data = waypoint_data
if prev_data is not None:
datas.append({'time': vehicle_data['time'],
vehicle + '_road': prev_data.get_road_name(),
vehicle + '_lane': prev_data.get_lane_name(),
vehicle + '_waypoint_index': prev_data.get_waypoint_index(),
})
return datas
def get_vehicle_lane(self, vehicle_dict, waypoint_dict):
"""
:param vehicle_dict:
:param waypoint_dict:
:return:
"""
vehicle_add_data = {}
for vehicle, vehicle_datas in vehicle_dict.items():
datas = self.calc_range(vehicle, vehicle_datas, waypoint_dict)
vehicle_add_data[vehicle] = datas
return vehicle_add_data
def create_points(self, measurement, vehicle, vehicle_data):
"""
:param measurement:
:param vehicle:
:param vehicle_data:
:return:
"""
data_list = []
for data in vehicle_data:
vehicle_road = vehicle + '_road'
vehicle_lane = vehicle + '_lane'
vehicle_waypoint_index = vehicle + '_waypoint_index'
_format = {'fields': {vehicle_road: data.get(vehicle_road),
vehicle_lane: data.get(vehicle_lane),
vehicle_waypoint_index: data.get(vehicle_waypoint_index),
},
'measurement': measurement,
'time': data.get('time')}
data_list.append(_format)
return data_list
def write_data(self, measurement, vehicle_add_data):
"""
:param measurement:
:param vehicle_add_data:
:return:
"""
for vehicle, vehicle_data in vehicle_add_data.items():
points = self.create_points(measurement, vehicle, vehicle_data)
self.influxdb_accessor.write(points)
def execute(self, imported_data_id=-1):
"""
レーン解析実行
:param imported_data_id:
:return:
"""
# 走行データ管理DBからレコードを取得
imported_data = self.get_imported_data(imported_data_id)
measurement = imported_data.measurement
map_id = imported_data.mapid
# 車両取得
vehicles = self.influxdb_accessor.get_vehicles(measurement)
# 処理対象の車両だけ残す
for vehicle in copy.deepcopy(vehicles):
if self.exists_fields(measurement, vehicle):
vehicles.remove(vehicle)
# 車両ごとのx, y, z情報
vehicle_dict = self.get_vehicle_data(measurement, vehicles)
# mapIdに対応するWaypointを取得
waypoint_dict = self.get_waypoint_data(map_id)
# 各車両の時系列ごとのWaypointIDを取得
vehicle_add_data = self.get_vehicle_lane(vehicle_dict, waypoint_dict)
# InfluxDBに書き込み
self.write_data(measurement, vehicle_add_data)
def execute(**kwargs):
"""
Airflowのpython_operatorからcall
:param kwargs:
:return:
"""
print('Record ID:{}'.format(kwargs['record_id']))
record_id = int(kwargs['record_id'])
analyzer = LaneAnalyzer()
analyzer.execute(imported_data_id=record_id)
if __name__ == '__main__':
# for debug
LaneAnalyzer().execute(imported_data_id=2)
| StarcoderdataPython |
6530068 | import sys
sys.path.append("..")
import numpy as np
from common.util import preprocess, create_co_matrix, cos_similarity, ppmi
text = "You say goodbye and I say hello."
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
W = ppmi(C)
np.set_printoptions(precision=3) # Limit digit to 3
print("covariance matrix")
print(C)
print("-"*50)
print("PPMI")
print(W)
| StarcoderdataPython |
1631606 | #!/usr/bin/env python
"""this is a test script that prints whatever argument is passed to it from the command line"""
import argparse
import subprocess as sub
if __name__ == "__main__":
parser = argparse.ArgumentParser('test')
# add the arguments needed to the parser
parser.add_argument('--test', dest='test_run', action='store_true')
parser.set_defaults(test_run=False)
# parse the arguments
args = parser.parse_args()
x = sub.check_output("qstat", shell=True)
print x
| StarcoderdataPython |
247327 | line = open("day10.txt", "r").readline()
def day10(iterate, sequence):
for i in range(iterate):
concat = ""
first = True
second = False
sameNumberCounter = 0
secondNum = 0
numCounter = 0
for num in sequence:
numCounter += 1
if second and secondNum != num:
concat += str(sameNumberCounter) + str(secondNum)
first = True
second = False
if numCounter == len(sequence):
sameNumberCounter = 1
concat += str(sameNumberCounter) + str(num)
sameNumberCounter = 0
secondNum = 0
elif secondNum == num:
sameNumberCounter += 1
continue
if first:
firstNum = num
sameNumberCounter += 1
first = False
continue
elif firstNum == num:
sameNumberCounter += 1
continue
else:
concat += str(sameNumberCounter) + str(firstNum)
secondNum = num
second = True
sameNumberCounter = 1
if numCounter == len(sequence):
concat += str(sameNumberCounter) + str(secondNum)
sequence = concat
print(len(concat))
print("Part 1:")
day10(40, line)
print("Part 2:")
day10(50, line)
| StarcoderdataPython |
11333434 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Dropbox sync_history database plugin."""
import unittest
from plaso.lib import definitions
from plaso.parsers.sqlite_plugins import dropbox
from tests.parsers.sqlite_plugins import test_lib
class DropboxSyncHistoryPluginTest(test_lib.SQLitePluginTestCase):
"""Tests for the Dropbox sync_history database plugin."""
def testProcess(self):
"""Tests the Process function on a Dropbox sync_history database file."""
plugin = dropbox.DropboxSyncDatabasePlugin()
storage_writer = self._ParseDatabaseFileWithPlugin(
['sync_history.db'], plugin)
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, 6)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetSortedEvents())
expected_event_values = {
'event_type': 'file',
'file_event_type': 'add',
'direction': 'upload',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAAGg',
'local_path': '/home/useraa/Dropbox/loc1/create_local.txt',
'date_time': '2022-02-17 10:57:18',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
expected_event_values = {
'event_type': 'file',
'file_event_type': 'delete',
'direction': 'upload',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAAKg',
'local_path': '/home/useraa/Dropbox/loc1/.create_local.txt.swp',
'date_time': '2022-02-17 10:57:19',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
expected_event_values = {
'event_type': 'file',
'file_event_type': 'add',
'direction': 'download',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAAKw',
'local_path': '/home/useraa/Dropbox/web1/create_web.txt',
'date_time': '2022-02-17 11:01:21',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[2], expected_event_values)
expected_event_values = {
'event_type': 'file',
'file_event_type': 'delete',
'direction': 'download',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAALA',
'local_path': '/home/useraa/Dropbox/web2/create_web.txt',
'date_time': '2022-02-17 11:04:03',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[3], expected_event_values)
expected_event_values = {
'event_type': 'file',
'file_event_type': 'edit',
'direction': 'download',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAALQ',
'local_path': '/home/useraa/Dropbox/web2/Document.docx',
'date_time': '2022-02-17 11:05:50',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[4], expected_event_values)
expected_event_values = {
'event_type': 'file',
'file_event_type': 'add',
'direction': 'download',
'file_identifier': 'XXXXXXXXXXXAAAAAAAAALg',
'local_path': '/home/useraa/Dropbox/web2/Untitled.gdoc',
'date_time': '2022-02-17 11:06:34',
'timestamp_desc': definitions.TIME_DESCRIPTION_RECORDED}
self.CheckEventValues(storage_writer, events[5], expected_event_values)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3524185 | import logging
import os
from queue import LifoQueue, Queue
from typing import Dict, Type, cast
from urllib import parse, request
from bs4 import BeautifulSoup
from bs4.element import Tag
from tqdm import tqdm
from scraper.graph.base_objects import EntityType, Url
from scraper.graph.graph import Graph
from scraper.graph.movie import Movie
from scraper.spider.actor_parser import ActorParser
from scraper.spider.movie_parser import MovieParser
from scraper.spider.utils import PageType, parse_page_type_get_infobox
logger = logging.getLogger('Web-Scraper')
class SpiderRunner:
_URL_PREFIX = 'https://en.wikipedia.org'
def __init__(self, init_url: Url, actor_limit: int = -1,
movie_limit: int = -1, queue: Type[Queue] = LifoQueue) -> None:
"""
Create a spider runner.
Args:
init_url: Has to be a url to a movie.
"""
self.init_url = init_url
self.graph = Graph()
# Contains tuple of (url, predecessor, weight)
self.queue = queue()
self.queue.put((init_url, None, 0))
self.actor_limit = actor_limit
self.movie_limit = movie_limit
@staticmethod
def get_full_url(url: Url) -> Url:
return cast(Url, parse.urljoin(SpiderRunner._URL_PREFIX, url))
def _process_movie(self, url: Url, html: Tag,
infobox: Dict[str, Tag]) -> None:
parser = MovieParser(url)
movie = parser.parse_movie_object(infobox)
if self.graph.add_node(movie):
stars = parser.parse_staring(infobox)
casts = parser.parse_cast(html)
total_actors = stars
actors_added = set(total_actors)
# Distribute weight according to the order
for actor in casts:
if actor not in actors_added:
total_actors.append(actor)
actors_added.add(actor)
total_weight_units = (1 + len(total_actors)) * len(total_actors) / 2
for i, actor in enumerate(reversed(total_actors)):
self.queue.put((actor, movie, (i + 1) / total_weight_units))
def _process_actor(self, url: Url, html: Tag, infobox: Dict[str, Tag],
predecessor: Movie, weight: float) -> None:
parser = ActorParser(url)
actor = parser.parse_actor_object(infobox)
if self.graph.add_node(actor):
if predecessor:
edge = self.graph.add_relationship(
predecessor.node_id, actor.node_id)
if edge:
edge.weight = weight
movies = parser.parse_related_movies(html)
for movie in movies:
self.queue.put((movie, None, 0))
def run(self):
try:
fmt = '{l_bar}{bar}[{elapsed}<{remaining}, {rate_fmt}{postfix} ]'
with tqdm(total=100, bar_format=fmt) as progress_bar:
previous_percent = 0
while not self.queue.empty():
url, predecessor, weight = self.queue.get()
logger.info(url)
logger.debug('%s %s %s' % (url, predecessor, weight))
if self.graph.check_node_exist(url):
logger.debug('skip %s' % url)
actor_parser = ActorParser(url)
full_url = self.get_full_url(url)
soup = BeautifulSoup(request.urlopen(full_url),
features="lxml")
page_type, infobox = parse_page_type_get_infobox(soup.html)
if page_type == PageType.ACTOR:
self._process_actor(url, soup.html, infobox,
predecessor,
weight)
elif page_type == PageType.MOVIE:
self._process_movie(url, soup.html, infobox)
num_actors = self.graph.num_node(EntityType.ACTOR)
num_movies = self.graph.num_node(EntityType.MOVIE)
progress = '\033[92mactor: %d, movie: %d\033[0m' % (
self.graph.num_node(EntityType.ACTOR),
self.graph.num_node(EntityType.MOVIE))
percentage = ((min(num_actors,
self.actor_limit) / self.actor_limit
+ min(num_movies,
self.movie_limit) /
self.movie_limit) / 2)
progress_bar.update((percentage - previous_percent) * 100)
previous_percent = percentage
progress_bar.set_postfix_str(progress)
if (0 < self.actor_limit < num_actors
and 0 < self.movie_limit < num_movies):
break
except KeyboardInterrupt:
logger.info('Terminated due to keyboard interrupt')
def save(self, out_file: str) -> None:
json_str = self.graph.serialize()
out_dir = os.path.dirname(out_file)
if os.path.isdir(out_dir) and not os.path.exists(out_dir):
os.mkdir(out_dir)
with open(out_file, 'w') as f:
f.write(json_str)
| StarcoderdataPython |
3260550 | <filename>models/gaussian_mixture.py
from pathlib import Path
import numpy as np
import pandas as pd
from sklearn import mixture
from tqdm import tqdm
class GaussianMixture:
def __init__(self, name, k=3):
self.name = name
self.params_fpath = Path(
f"results/gmm_age_posterior_fit_params_{self.name}.csv")
self.results_fpath = Path(
f"results/gmm_age_posterior_fit_results_{self.name}.pkl")
self.k = k
def fit_age_posteriors(self, age_df, **kwargs):
# snids = age_df.index.unique()
# results = {}
series = age_df["age"].groupby("snid").apply(list)
snid_gmm_params = {}
for snid, age_posterior in tqdm(series.iteritems(), total=len(series), desc="Fitting age posteriors"):
params = self.fit(age_posterior, **kwargs)
snid_gmm_params[snid] = params
return snid_gmm_params
def fit(self, x, **kwargs):
x = np.asarray(x)
gmm = mixture.GaussianMixture(
n_components=self.k, covariance_type="spherical", **kwargs)
gmm.fit(x.reshape(len(x), -1))
params = {}
for i in range(self.k):
params[f"mean{i}"] = gmm.means_.reshape(self.k)[i]
params[f"sigma{i}"] = np.sqrt(gmm.covariances_[i])
params[f"weight{i}"] = gmm.weights_[i]
return params
# def save(self):
# assert self.gmms is not None, "No results can be saved before fit is ran."
# # # Save GMM object
# # with self.results_fpath.open("wb") as f:
# # pickle.dump(self.gmms, f)
# # print(f"Saved successful {self.results_fpath}")
# # Save GMM params
# params = self.get_params()
# params.to_csv(self.params_fpath, index=False)
# print(f"Saved successful {self.params_fpath}")
# def load(self, fpath=None):
# # Load GMM object
# fpath = fpath or self.results_fpath
# with fpath.open("rb") as f:
# self.gmms = pickle.load(f)
# def get_results(self):
# return self.gmms
# def get_params(self):
# params = {
# "y": [],
# "mu_x": [],
# "sigma_x": [],
# "weights_x": []
# }
# for y, gmm in self.gmms.items():
# params["y"].append(y)
# params["mu_x"].append(gmm.means_)
# params["sigma_x"].append(gmm.covariances_)
# params["weights_x"].append(gmm.weights_)
# params["mu_x"] = np.array(params["mu_x"]).reshape(-1, self.k)
# params["sigma_x"] = np.array(params["sigma_x"])
# params["weights_x"] = np.array(params["sigma_x"])
# params = pd.DataFrame(
# np.hstack(
# (params["y"], params["mu_x"], params["sigma_x"], params["weights_x"])),
# columns=(
# "y",
# [f"mean{i}" for i in range(1, self.k + 1)] +
# [f"sigma{i}" for i in range(1, self.k + 1)] +
# [f"weight{i}" for i in range(1, self.k + 1)]
# ),
# )
# return params
# params = np.genfromtxt(self.params_fpath, delimiter=",")
# if format == "numpy":
# return params
# elif format == "dataframe":
# k = self.k
# # mu_x = params[:, :k]
# # sigma_x = params[:, k:2*k]
# # weights_x = params[:, 2*k:3*k]
# # Save the GMM parameters
# params_df = pd.DataFrame(params, )
# return params_df
| StarcoderdataPython |
170540 | <filename>aether-kernel/aether/kernel/api/migrations/0006_auto_20180122_1346.py
# Generated by Django 2.0.1 on 2018-01-22 13:46
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('kernel', '0005_auto_20180116_1246'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='attachment',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='mapping',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='mapping',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='project',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='project',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='projectschema',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='projectschema',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='schema',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='schema',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='submission',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='submission',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
]
| StarcoderdataPython |
370218 | """
(c) 2020 <NAME>, MIT Licence
Python Landscape Classification Tool (PyLC)
Reference: An evaluation of deep learning semantic segmentation
for land cover classification of oblique ground-based photography,
MSc. Thesis 2020.
<http://hdl.handle.net/1828/12156>
<NAME> <<EMAIL>>, June 2020
University of Victoria
Module: Evaluator Class
File: evaluate.py
"""
import json
import os
import torch
import utils.tex as tex
import numpy as np
import cv2
import utils.tools as utils
from config import defaults, Parameters
from utils.metrics import Metrics
class Evaluator:
"""
Handles model test/evaluation functionality.
Parameters
------
params: Parameters
Updated parameters.
"""
def __init__(self, params=None):
# initialize parameters, metrics
self.meta = Parameters(params) if params is not None else defaults
self.metrics = Metrics()
# Model results
self.fid = None
self.logits = None
self.mask_pred = None
self.results = []
# data buffers
self.y_true = None
self.y_pred = None
self.labels = []
# multi-image data buffers for aggregate evaluation
self.aggregate = False
self.y_true_aggregate = []
self.y_pred_aggregate = []
# Make output and mask directories for results
self.model_path = None
self.output_dir = os.path.join(defaults.output_dir, self.meta.id)
self.masks_dir = utils.mk_path(os.path.join(self.output_dir, 'masks'))
self.logits_dir = utils.mk_path(os.path.join(self.output_dir, 'logits'))
self.metrics_dir = utils.mk_path(os.path.join(self.output_dir, 'metrics'))
def load(self, mask_pred, meta, mask_true_path=None, scale=None):
"""
Initialize predicted/ground truth image masks for
evaluation metrics.
Parameters:
-----------
mask_pred_logits: torch.tensor
Unnormalized model logits for predicted segmentation [NCHW]
meta: dict
Reconstruction metadata.
mask_true_path: str
File path to ground-truth mask [CHW]
"""
# store metadata
self.meta = meta
# file identifier (include current scale)
self.fid = self.meta.extract['fid']
# reconstruct unnormalized model outputs into mask data array
self.mask_pred = mask_pred
if mask_true_path:
# load ground-truth data
mask_true, w, h, w_scaled, h_scaled = utils.get_image(
mask_true_path,
ch=3,
scale=scale,
interpolate=cv2.INTER_NEAREST
)
# check dimensions of ground truth mask and predicted mask
if not (w_scaled == self.meta.extract['w_scaled'] and h_scaled == self.meta.extract['h_scaled']):
print("Ground truth mask dims ({}px X {}px) do not match predicted mask dims ({}px X {}px).".format(
w_scaled, h_scaled, self.meta.extract['w_scaled'], self.meta.extract['h_scaled']
))
exit(1)
self.y_true = torch.as_tensor(torch.tensor(mask_true), dtype=torch.uint8).permute(2, 0, 1).unsqueeze(0)
self.y_pred = torch.as_tensor(self.mask_pred, dtype=torch.uint8).permute(2, 0, 1).unsqueeze(0)
# Class encode input predicted data
self.y_pred = utils.class_encode(self.y_pred, self.meta.palette_rgb)
self.y_true = utils.class_encode(self.y_true, self.meta.palette_rgb)
# Verify same size of target == input
assert self.y_pred.shape == self.y_true.shape, "Input dimensions {} not same as target {}.".format(
self.y_pred.shape, self.y_true.shape)
self.y_pred = self.y_pred.flatten()
self.y_true = self.y_true.flatten()
# load input data into metrics
self.y_true_aggregate += [self.y_true]
self.y_pred_aggregate += [self.y_pred]
return self
def update(self, meta):
"""
Update local metadata
"""
self.meta = meta
return self
def evaluate(self, aggregate=False):
"""
Compute evaluation metrics
Parameters
----------
aggregate: bool
Compute aggregate metrics for multiple data loads.
"""
self.aggregate = aggregate
self.validate()
self.metrics.f1_score(self.y_true, self.y_pred)
self.metrics.jaccard(self.y_true, self.y_pred)
self.metrics.mcc(self.y_true, self.y_pred)
self.metrics.confusion_matrix(self.y_true, self.y_pred, labels=self.labels)
self.metrics.report(self.y_true, self.y_pred, labels=self.labels)
return self
def validate(self):
"""
Validates mask data for computations.
- Ensures all classes are represented in ground truth mask.
"""
self.labels = defaults.class_codes
# aggregated metrics
if self.aggregate:
self.fid = 'aggregate_metrics'
assert self.y_true_aggregate and self.y_pred_aggregate, \
"Aggregate evaluation failed. Data buffer is empty."
print("\nReporting aggregate metrics ... ")
print("\t - Total generated masks: {}".format(len(self.y_pred_aggregate)))
print()
# Concatenate aggregated data
self.y_true = np.concatenate((self.y_true_aggregate))
self.y_pred = np.concatenate((self.y_pred_aggregate))
# ensure class coverage
for idx in range(len(self.labels)):
self.y_true[idx] = idx
self.y_pred[idx] = idx
return self
def reset(self):
"""
Resets evaluator buffers.
"""
self.logits = None
self.mask_pred = None
self.results = []
self.meta = {}
self.y_true = None
self.y_pred = None
def save_logits(self, logits):
"""
Save unnormalized model outputs (logits) to file.
Parameters
----------
logits: list
Unnormalized model outputs.
Returns
-------
logits_file: str
Output path to model outputs file.
"""
# save unnormalized model outputs
logits_file = os.path.join(self.logits_dir, self.fid + '_output.pth')
if utils.confirm_write_file(logits_file):
torch.save({"results": logits, "meta": self.meta}, logits_file)
print("Model output data saved to \n\t{}.".format(logits_file))
return logits_file
return
def save_metrics(self):
"""
Save prediction evaluation results to files.
Returns
-------
metrics_file: str
Output path to metrics data file.
metrics_file: str
Output path to confusion matrix PDF file.
metrics_file: str
Output path to confusion matrix data file.
"""
# Build output file paths
metrics_file = os.path.join(self.metrics_dir, self.fid + '_eval.json')
cmap_img_file = os.path.join(self.metrics_dir, self.fid + '_cmap.pdf')
cmap_data_file = os.path.join(self.metrics_dir, self.fid + '_cmap.npy')
# save evaluation metrics results as JSON file
if utils.confirm_write_file(metrics_file):
with open(metrics_file, 'w') as fp:
json.dump(self.metrics.results, fp, indent=4)
# save confusion matrix as PDF and data file
if utils.confirm_write_file(cmap_img_file):
self.metrics.cmap.get_figure().savefig(cmap_img_file, format='pdf', dpi=400)
np.save(cmap_data_file, self.metrics.cmatrix)
# clear metrics plot
self.metrics.plt.clf()
return metrics_file, cmap_img_file, cmap_data_file
def save_tex(self):
"""
Save prediction evaluation results as LaTeX table to file.
Returns
-------
tex_file: str
Output path to TeX data file.
"""
tex_file = os.path.join(self.metrics_dir, self.fid + '_metrics.tex')
if utils.confirm_write_file(tex_file):
with open(tex_file, 'w') as fp:
fp.write(tex.convert_md_to_tex(self.meta))
return tex_file
return
def save_image(self):
"""
Reconstructs segmentation prediction as mask image.
Output mask image saved to file (RGB -> BGR conversion)
Note that the default color format in OpenCV is often
referred to as RGB but it is actually BGR (the bytes are
reversed).
Returns
-------
mask_data: np.array
Output mask data.
"""
# Build mask file path
mask_file = os.path.join(self.masks_dir, self.fid + '.png')
if self.mask_pred is None:
print("Mask has not been reconstructed. Image save cancelled.")
if utils.confirm_write_file(mask_file):
# Reconstruct seg-mask from predicted tiles and write to file
cv2.imwrite(mask_file, cv2.cvtColor(self.mask_pred, cv2.COLOR_RGB2BGR))
print("Output mask saved to: \n\t{}.".format(mask_file))
return mask_file
return
| StarcoderdataPython |
357511 | import pandas as pd
from Package.reader import Reader
from datetime import datetime, date, time
from Package.facebook_sub_readers.date_transformers import main_transfo_timestamp_10
from Package.facebook_sub_readers.own_post_transformers import gen, lists_creator
ALL_GENERAL = False
ALL_INDEX = False
class FacebookPostForSaleItemReader(Reader):
def read(self):
'''
Same process as most of function above but for items saled on Facebook.
'''
df = pd.read_json(self.path, convert_dates = False, encoding='utf-8')
df = df[df['attachments'].notna()]
returned = lists_creator(df,'for_sale_item')
list_date = returned[0]
list_att = returned[1]
outputdict = {}
for lis in list_att:
for lis_2 in lis:
for key, value in lis_2.items():
for lis_3 in value:
for k2, v2 in lis_3.items():
for k3, v3 in v2.items():
if isinstance (v3, dict):
for k4, v4 in v3.items():
if isinstance(v4, dict):
for k5, v5 in v4.items():
outputdict[k5] = outputdict.get(k5, []) + [v5]
else:
outputdict[k4] = outputdict.get(k4, []) + [v4]
else:
outputdict[k3] = outputdict.get(k3, []) + [v3]
outputdict.pop('description',None)
outputdict.pop('title')
df1 = pd.DataFrame.from_dict(outputdict)
df1['date'] = list_date
main_df = main_transfo_timestamp_10(df1, 'Facebook', 'for sale item')
if ALL_INDEX:
main_df['price'] = df1['price']
main_df['seller']=df1['price']
main_df['category']=df1['category']
main_df['marketplace']=df1['marketplace']
main_df['location']=df1['name']
main_df['latitude']=df1['latitude']
main_df['longitude']=df1['longitude']
main_df['uri']=df1['uri']
main_df['ip']=df1['upload_ip']
main_df = main_df[['date','type','label','price','seller','category','marketplace',\
'location','latitude','longitude','uri','ip','Year','Month','Day','Hour']]
if ALL_GENERAL:
main_df['name'] = df1.marketplace
main_df['content'] = df1.uri
main_df = main_df[['date','type','label','name','content','Year','Month','Day','Hour']]
main_df.sort_values(["date"],axis=0,ascending=True,inplace=True)
self.df = main_df | StarcoderdataPython |
278233 | # Copyright (c) The Diem Core Contributors
# SPDX-License-Identifier: Apache-2.0
from dataclasses import replace, asdict
from diem.testing.miniwallet import KycSample, Account, PaymentUri, Transaction, PaymentCommand
from diem import offchain
def test_match_kyc_data():
ks = KycSample.gen("foo")
obj = offchain.from_json(ks.soft_match, offchain.KycDataObject)
assert ks.match_kyc_data("soft_match", obj)
assert not ks.match_kyc_data("reject", obj)
obj = replace(obj, legal_entity_name="hello")
assert ks.match_kyc_data("soft_match", obj)
obj = replace(obj, given_name="hello")
assert not ks.match_kyc_data("soft_match", obj)
def test_decode_account_kyc_data():
assert Account(id="1").kyc_data_object() == offchain.individual_kyc_data()
sample = KycSample.gen("foo")
account = Account(id="1", kyc_data=sample.minimum)
assert account.kyc_data_object()
assert offchain.to_json(account.kyc_data_object()) == sample.minimum
def test_payment_uri_intent_identifier():
uri = PaymentUri(
id="1",
account_id="2",
payment_uri="diem://dm1p7ujcndcl7nudzwt8fglhx6wxn08kgs5tm6mz4us2vfufk",
)
assert uri.intent("dm")
assert uri.intent("dm").sub_address.hex() == "cf64428bdeb62af2"
def test_transaction_balance_amount():
txn = Transaction(id="1", account_id="2", currency="XUS", amount=1000, status=Transaction.Status.pending)
assert txn.balance_amount() == 1000
txn.payee = "dm1p7ujcndcl7nudzwt8fglhx6wxn08kgs5tm6mz4us2vfufk"
assert txn.balance_amount() == -1000
def test_transaction_subaddress():
txn = Transaction(id="1", account_id="2", currency="XUS", amount=1000, status=Transaction.Status.pending)
txn.subaddress_hex = "cf64428bdeb62af2"
assert txn.subaddress().hex() == "cf64428bdeb62af2"
def test_payment_command_to_offchain_command(factory):
offchain_cmd = factory.new_sender_payment_command()
cmd = PaymentCommand(
id="1",
account_id="2",
is_sender=offchain_cmd.is_sender(),
reference_id=offchain_cmd.reference_id(),
is_inbound=offchain_cmd.is_inbound(),
cid=offchain_cmd.id(),
payment_object=asdict(offchain_cmd.payment),
)
assert cmd.to_offchain_command() == offchain_cmd
| StarcoderdataPython |
1649649 | <filename>utils/logConf.py
import logging
format="%(asctime)s [%(filename)s:%(lineno)d] %(levelname)-8s %(message)s"
logging.basicConfig(level=logging.DEBUG, format=format) | StarcoderdataPython |
4966748 | <gh_stars>10-100
import copytext
import datetime
import logging
from oauth import get_document
import os
from peewee import IntegrityError
import pytz
import time
import app_config
from util.models import Story
from scrapers.npr_api import NPRAPIScraper
from scrapers.screenshot import Screenshotter
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
npr_api_scraper = NPRAPIScraper()
screenshotter = Screenshotter()
POCKY_TRACKER = os.environ.get('POCKY_SHEET')
class PockyScraper:
"""
From https://github.com/tarbell-project/tarbell/blob/1.0.5/tarbell/app.py#L251
"""
def __init__(self, source):
self.source = source
@staticmethod
def parse_date(value):
"""
Attemts to parse a date from Excel into something the rest of the world
can use.
"""
if not value:
return None
value = float(value)
seconds = (value - 25569) * 86400.0
parsed = datetime.datetime.utcfromtimestamp(seconds)
tz = pytz.timezone(app_config.PROJECT_TIMEZONE)
parsed = tz.localize(parsed)
print "parsed"
print parsed
return parsed
def scrape_and_load(self, path=app_config.STORIES_PATH):
if not POCKY_TRACKER:
return
get_document(POCKY_TRACKER, path)
raw_stories = self.scrape_spreadsheet()
stories = self.write(stories=raw_stories, team=self.source['team'])
return stories
def scrape_spreadsheet(self, path=app_config.STORIES_PATH):
"""
Scrape the pocky tracker spreadsheet
"""
spreadsheet = copytext.Copy(path)
data = spreadsheet['reviews']
return data
def write(self, stories, team=None):
"""
Save rows to the database
"""
new_stories = []
for story in stories:
slug = story['official flavor description'] + ' - ' + story['taster']
try:
story = Story.create(
name=story['name'].strip(),
slug=slug,
date=PockyScraper.parse_date(story['date tasted']),
story_type='pocky',
team=team,
)
logger.info('Added {0}'.format(story.name))
new_stories.append(story)
except IntegrityError:
# Story probably already exists.
logger.info('Not adding %s to database: probably already exists' % (slug))
pass
return new_stories
| StarcoderdataPython |
1690057 | <gh_stars>1-10
# Once for All: Train One Network and Specialize it for Efficient Deployment
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# International Conference on Learning Representations (ICLR), 2020.
import random
import numpy as np
from search.rm_search.ofa.imagenet_classification.networks import ResNets
__all__ = ['MobileNetArchEncoder', 'ResNetArchEncoder']
class MobileNetArchEncoder:
SPACE_TYPE = 'mbv3'
def __init__(self, image_size_list=None, ks_list=None, expand_list=None, depth_list=None, n_stage=None):
self.image_size_list = [224] if image_size_list is None else image_size_list
self.ks_list = [3, 5, 7] if ks_list is None else ks_list
self.expand_list = [3, 4, 6] if expand_list is None else [int(expand) for expand in expand_list]
self.depth_list = [2, 3, 4] if depth_list is None else depth_list
if n_stage is not None:
self.n_stage = n_stage
elif self.SPACE_TYPE == 'mbv2':
self.n_stage = 6
elif self.SPACE_TYPE == 'mbv3':
self.n_stage = 5
else:
raise NotImplementedError
# build info dict
self.n_dim = 0
self.r_info = dict(id2val={}, val2id={}, L=[], R=[])
self._build_info_dict(target='r')
self.k_info = dict(id2val=[], val2id=[], L=[], R=[])
self.e_info = dict(id2val=[], val2id=[], L=[], R=[])
self._build_info_dict(target='k')
self._build_info_dict(target='e')
@property
def max_n_blocks(self):
if self.SPACE_TYPE == 'mbv3':
return self.n_stage * max(self.depth_list)
elif self.SPACE_TYPE == 'mbv2':
return (self.n_stage - 1) * max(self.depth_list) + 1
else:
raise NotImplementedError
def _build_info_dict(self, target):
if target == 'r':
target_dict = self.r_info
target_dict['L'].append(self.n_dim)
for img_size in self.image_size_list:
target_dict['val2id'][img_size] = self.n_dim
target_dict['id2val'][self.n_dim] = img_size
self.n_dim += 1
target_dict['R'].append(self.n_dim)
else:
if target == 'k':
target_dict = self.k_info
choices = self.ks_list
elif target == 'e':
target_dict = self.e_info
choices = self.expand_list
else:
raise NotImplementedError
for i in range(self.max_n_blocks):
target_dict['val2id'].append({})
target_dict['id2val'].append({})
target_dict['L'].append(self.n_dim)
for k in choices:
target_dict['val2id'][i][k] = self.n_dim
target_dict['id2val'][i][self.n_dim] = k
self.n_dim += 1
target_dict['R'].append(self.n_dim)
def arch2feature(self, arch_dict):
ks, e, d, r = arch_dict['ks'], arch_dict['e'], arch_dict['d'], arch_dict['image_size']
feature = np.zeros(self.n_dim)
for i in range(self.max_n_blocks):
nowd = i % max(self.depth_list)
stg = i // max(self.depth_list)
if nowd < d[stg]:
feature[self.k_info['val2id'][i][ks[i]]] = 1
feature[self.e_info['val2id'][i][e[i]]] = 1
feature[self.r_info['val2id'][r]] = 1
return feature
def feature2arch(self, feature):
img_sz = self.r_info['id2val'][
int(np.argmax(feature[self.r_info['L'][0]:self.r_info['R'][0]])) + self.r_info['L'][0]
]
assert img_sz in self.image_size_list
arch_dict = {'ks': [], 'e': [], 'd': [], 'image_size': img_sz}
d = 0
for i in range(self.max_n_blocks):
skip = True
for j in range(self.k_info['L'][i], self.k_info['R'][i]):
if feature[j] == 1:
arch_dict['ks'].append(self.k_info['id2val'][i][j])
skip = False
break
for j in range(self.e_info['L'][i], self.e_info['R'][i]):
if feature[j] == 1:
arch_dict['e'].append(self.e_info['id2val'][i][j])
assert not skip
skip = False
break
if skip:
arch_dict['e'].append(0)
arch_dict['ks'].append(0)
else:
d += 1
if (i + 1) % max(self.depth_list) == 0 or (i + 1) == self.max_n_blocks:
arch_dict['d'].append(d)
d = 0
return arch_dict
def random_sample_arch(self):
return {
'ks': random.choices(self.ks_list, k=self.max_n_blocks),
'e': random.choices(self.expand_list, k=self.max_n_blocks),
'd': random.choices(self.depth_list, k=self.n_stage),
'image_size': random.choice(self.image_size_list)
}
def mutate_resolution(self, arch_dict, mutate_prob):
if random.random() < mutate_prob:
arch_dict['image_size'] = random.choice(self.image_size_list)
return arch_dict
def mutate_arch(self, arch_dict, mutate_prob):
for i in range(self.max_n_blocks):
if random.random() < mutate_prob:
arch_dict['ks'][i] = random.choice(self.ks_list)
arch_dict['e'][i] = random.choice(self.expand_list)
for i in range(self.n_stage):
if random.random() < mutate_prob:
arch_dict['d'][i] = random.choice(self.depth_list)
return arch_dict
class ResNetArchEncoder:
def __init__(self, image_size_list=None, depth_list=None, expand_list=None, width_mult_list=None,
base_depth_list=None):
self.image_size_list = [224] if image_size_list is None else image_size_list
self.expand_list = [0.2, 0.25, 0.35] if expand_list is None else expand_list
self.depth_list = [0, 1, 2] if depth_list is None else depth_list
self.width_mult_list = [0.65, 0.8, 1.0] if width_mult_list is None else width_mult_list
self.base_depth_list = ResNets.BASE_DEPTH_LIST if base_depth_list is None else base_depth_list
"""" build info dict """
self.n_dim = 0
# resolution
self.r_info = dict(id2val={}, val2id={}, L=[], R=[])
self._build_info_dict(target='r')
# input stem skip
self.input_stem_d_info = dict(id2val={}, val2id={}, L=[], R=[])
self._build_info_dict(target='input_stem_d')
# width_mult
self.width_mult_info = dict(id2val=[], val2id=[], L=[], R=[])
self._build_info_dict(target='width_mult')
# expand ratio
self.e_info = dict(id2val=[], val2id=[], L=[], R=[])
self._build_info_dict(target='e')
@property
def n_stage(self):
return len(self.base_depth_list)
@property
def max_n_blocks(self):
return sum(self.base_depth_list) + self.n_stage * max(self.depth_list)
def _build_info_dict(self, target):
if target == 'r':
target_dict = self.r_info
target_dict['L'].append(self.n_dim)
for img_size in self.image_size_list:
target_dict['val2id'][img_size] = self.n_dim
target_dict['id2val'][self.n_dim] = img_size
self.n_dim += 1
target_dict['R'].append(self.n_dim)
elif target == 'input_stem_d':
target_dict = self.input_stem_d_info
target_dict['L'].append(self.n_dim)
for skip in [0, 1]:
target_dict['val2id'][skip] = self.n_dim
target_dict['id2val'][self.n_dim] = skip
self.n_dim += 1
target_dict['R'].append(self.n_dim)
elif target == 'e':
target_dict = self.e_info
choices = self.expand_list
for i in range(self.max_n_blocks):
target_dict['val2id'].append({})
target_dict['id2val'].append({})
target_dict['L'].append(self.n_dim)
for e in choices:
target_dict['val2id'][i][e] = self.n_dim
target_dict['id2val'][i][self.n_dim] = e
self.n_dim += 1
target_dict['R'].append(self.n_dim)
elif target == 'width_mult':
target_dict = self.width_mult_info
choices = list(range(len(self.width_mult_list)))
for i in range(self.n_stage + 2):
target_dict['val2id'].append({})
target_dict['id2val'].append({})
target_dict['L'].append(self.n_dim)
for w in choices:
target_dict['val2id'][i][w] = self.n_dim
target_dict['id2val'][i][self.n_dim] = w
self.n_dim += 1
target_dict['R'].append(self.n_dim)
def arch2feature(self, arch_dict):
d, e, w, r = arch_dict['d'], arch_dict['e'], arch_dict['w'], arch_dict['image_size']
input_stem_skip = 1 if d[0] > 0 else 0
d = d[1:]
feature = np.zeros(self.n_dim)
feature[self.r_info['val2id'][r]] = 1
feature[self.input_stem_d_info['val2id'][input_stem_skip]] = 1
for i in range(self.n_stage + 2):
feature[self.width_mult_info['val2id'][i][w[i]]] = 1
start_pt = 0
for i, base_depth in enumerate(self.base_depth_list):
depth = base_depth + d[i]
for j in range(start_pt, start_pt + depth):
feature[self.e_info['val2id'][j][e[j]]] = 1
start_pt += max(self.depth_list) + base_depth
return feature
def feature2arch(self, feature):
img_sz = self.r_info['id2val'][
int(np.argmax(feature[self.r_info['L'][0]:self.r_info['R'][0]])) + self.r_info['L'][0]
]
input_stem_skip = self.input_stem_d_info['id2val'][
int(np.argmax(feature[self.input_stem_d_info['L'][0]:self.input_stem_d_info['R'][0]])) +
self.input_stem_d_info['L'][0]
] * 2
assert img_sz in self.image_size_list
arch_dict = {'d': [input_stem_skip], 'e': [], 'w': [], 'image_size': img_sz}
for i in range(self.n_stage + 2):
arch_dict['w'].append(
self.width_mult_info['id2val'][i][
int(np.argmax(feature[self.width_mult_info['L'][i]:self.width_mult_info['R'][i]])) +
self.width_mult_info['L'][i]
]
)
d = 0
skipped = 0
stage_id = 0
for i in range(self.max_n_blocks):
skip = True
for j in range(self.e_info['L'][i], self.e_info['R'][i]):
if feature[j] == 1:
arch_dict['e'].append(self.e_info['id2val'][i][j])
skip = False
break
if skip:
arch_dict['e'].append(0)
skipped += 1
else:
d += 1
if i + 1 == self.max_n_blocks or (skipped + d) % \
(max(self.depth_list) + self.base_depth_list[stage_id]) == 0:
arch_dict['d'].append(d - self.base_depth_list[stage_id])
d, skipped = 0, 0
stage_id += 1
return arch_dict
def random_sample_arch(self):
return {
'd': [random.choice([0, 2])] + random.choices(self.depth_list, k=self.n_stage),
'e': random.choices(self.expand_list, k=self.max_n_blocks),
'w': random.choices(list(range(len(self.width_mult_list))), k=self.n_stage + 2),
'image_size': random.choice(self.image_size_list)
}
def mutate_resolution(self, arch_dict, mutate_prob):
if random.random() < mutate_prob:
arch_dict['image_size'] = random.choice(self.image_size_list)
return arch_dict
def mutate_arch(self, arch_dict, mutate_prob):
# input stem skip
if random.random() < mutate_prob:
arch_dict['d'][0] = random.choice([0, 2])
# depth
for i in range(1, len(arch_dict['d'])):
if random.random() < mutate_prob:
arch_dict['d'][i] = random.choice(self.depth_list)
# width_mult
for i in range(len(arch_dict['w'])):
if random.random() < mutate_prob:
arch_dict['w'][i] = random.choice(list(range(len(self.width_mult_list))))
# expand ratio
for i in range(len(arch_dict['e'])):
if random.random() < mutate_prob:
arch_dict['e'][i] = random.choice(self.expand_list)
| StarcoderdataPython |
4993793 | <reponame>Mifour/Algorithms
def retrospectively_best_profit(array):
"""
given an array of recored stocks during a period,
what the best profit possible on that period?
O(n**2) time
"""
best_profit = 0
for buy_day in array:
profit = max(array[buy_day:]) - buy_day
best_profit = max(best_profit, profit)
return best_profit
def retrospectively_best_profit2(array):
"""
given an array of recored stocks during a period,
what the best profit possible on that period?
O(n) time
O(n) space
"""
maxes = [0 for n in range(len(array))]
maxes[-1] = array[-1]
for elem in range(len(array)-2, 0, -1):
maxes[elem] = max(array[elem], maxes[elem+1])
best_profit = 0
for i in range(len(array)):
profit = maxes[i] - array[i]
best_profit = max(best_profit, profit)
return best_profit
| StarcoderdataPython |
89342 | # input a directory, each file is a document
import os
import sys
from gensim import corpora, models
from topic.topicio import TopicIO
from preprocess.DocTokenizer import DirDocTokenizer, FileDocTokenizer
from utils.WordCounter import WordCounter
#
# Analyze a specific LDA file and output results
# Assume the existence of the LDA file, the corpus, src for the corpus, and the dictionary
#
# python lda_analyze.py <input directory> <corpus type> <# of topics> <src> <src type> <alpha> <eta> <output dir>
# <input directory> Directory that saves LDA, dictionary and corpora
# <corpus type> default to bag of words. b for binary, t for tf-idf, anything else or missing for bag of words
# <# of topics> default to 8
# <src> the original corpus src name. used for name conventions
# <src type> 0: a file in which each line is a document.
# Anything else or missing: a directory in which each file is a document
# <output dir> results output directory
# <alpha> default ot 1/# of topics
# <eta> default to 1/# of topics
#
# Read command line parameters
#
if len(sys.argv) <= 1:
dname = "output"
else:
dname = sys.argv[1]
if len(sys.argv) <= 2:
corpus_type = "bow"
else:
if sys.argv[2] == "t":
corpus_type = "tfidf"
elif sys.argv[2] == "b":
corpus_type = "binary"
else:
corpus_type = "bow"
if len(sys.argv) <= 3:
topics_count = 8;
else:
topics_count = int(sys.argv[3]);
if len(sys.argv) <= 4:
src = "pp_test"
else:
src = sys.argv[4];
if len(sys.argv) <= 5:
input_type = True
else:
if int(sys.argv[5]) == 0:
input_type = False
else:
input_type = True
if len(sys.argv) <= 6 or sys.argv[6] == "d":
alpha_set = False
else:
alpha_set = True
alpha = sys.argv[6]
if len(sys.argv) <= 7:
eta_set = False
else:
eta_set = True
eta = sys.argv[7]
if len(sys.argv) <= 8:
if alpha_set:
output = "LDA_" + src + "_" + corpus_type + "_t" + str(topics_count)+"_alpha"+str(alpha)
elif eta_set:
output = "LDA_" + src + "_" + corpus_type + "_t" + str(topics_count) + "_eta" + str(eta)
else:
output = "LDA_" + src + "_" + corpus_type + "_t" + str(topics_count)
else:
output = sys.argv[8]
print "input directory : " + dname
print "# of topics : " + str(topics_count)
print "corpus type :" + corpus_type
print "source : " + src
print "input type :",
if input_type:
print "Input is a directory"
else:
print "Input is a file"
print "output directory : " + output
if alpha_set:
print "alpha : "+ alpha
if eta_set:
print "eta : " + eta
print "\n"
# Check the output directory
if not os.path.exists(output):
os.makedirs(output)
if input_type:
# if input is a directory
mdt = DirDocTokenizer()
doc_list, token_list = mdt.orig(src)
else:
fdt = FileDocTokenizer()
doc_list, token_list = fdt.save_pp(src)
# Load LDA
if alpha_set:
lda_fname = dname + "/" + corpus_type + "_t" + str(topics_count) + "_alpha" + str(alpha) + ".lda"
elif eta_set:
lda_fname = dname + "/" + corpus_type + "_t" + str(topics_count) + "_eta" + str(eta) + ".lda"
else:
lda_fname = dname + "/" + corpus_type + "_t" + str(topics_count) + ".lda"
print "Load LDA file : " + lda_fname
lda = models.LdaModel.load(lda_fname, mmap="r")
# Load required corpus according to the argument corpus_type
if corpus_type == 'tfidf':
corpus_fname = dname + '/tfidf_corpus.mm'
elif corpus_type == 'binary':
corpus_fname = dname + '/binary_corpus.mm'
else:
corpus_fname = dname + '/bow_corpus.mm'
print "Load Corpus File " + corpus_fname
corpus = corpora.MmCorpus(corpus_fname)
corpus_lda = lda[corpus]
corpus_dict = [dict(doc) for doc in corpus]
dictionary = lda.id2word
print dictionary
# Write topics
topics_io = TopicIO()
topics_output = output + "/topics"
topics_io.write_topics(model=lda, orig_dir=src, num_topics=topics_count, num_words=len(dictionary.keys()),
output_dir=topics_output)
# For each document, print the probability that the document being to each topic
# (Here we just print the original document file name)
length = len(max([fname for fname in doc_list]))
# Generate document-topic matrix
dunsorted = open(output + "/" + "unsorted_doc_topics.txt", "w")
dunsorted.write("Corpus Type: " + corpus_type)
dunsorted.write("\nTopic Count: " + str(topics_count))
dsorted = open(output + "/" + "sorted_doc_topics.txt", "w")
dsorted.write("Corpus Type: " + corpus_type)
dsorted.write("\nTopic Count: " + str(topics_count))
doctlist = []
for num in range(topics_count):
doctlist.append([])
for i, doc in enumerate(corpus_lda):
dunsorted.write("\n\n" + '{:{l}}'.format(doc_list[i], l=length + 3))
dsorted.write("\n\n" + '{:{l}}'.format(doc_list[i], l=length + 3))
#write the topic list for each doc by the topic number order
for value in doc:
dunsorted.write('{:22}'.format(" " + str(value[0]) + ": " + str('{:.15f}'.format(value[1])) + " "))
#write the topic list for each doc by a decreasing probability order
doc = list(reversed(sorted(doc, key=lambda x: x[1])))
doctlist[doc[0][0]].append(doc_list[i])
for value in doc:
dsorted.write('{:22}'.format(" " + str(value[0]) + ": " + str('{:.15f}'.format(value[1])) + " "))
# topic-document matrix
# For each topic, output documents illustrate the highest probability on it
tdoc = open(output + "/" + "td_cluster.txt", "w")
for index, sublist in enumerate(doctlist):
tdoc.write("topic " + str(index) + ": ")
tdoc.write(str(len(sublist)))
for value in sublist:
tdoc.write(" "+value)
tdoc.write("\n")
# topic-words matrix
# For each topic, output top 300 words in the topic
tw = open(output + "/" + "words_in_topics.txt", "w")
tw.write("Corpus Type: " + corpus_type)
tw.write("\nTopic Count: " + str(topics_count))
for i in range(topics_count):
tw.write("\n\nTopic " + str(i) + "\n")
for w_tuple in lda.show_topic(i, 300):
tw.write(str(w_tuple[0]) + ": " + str('{:.10f}'.format(w_tuple[1])) + "\n")
# Represent topic be decreasing probability difference
# probability difference = word probability in the topic - word probability in the corpus
wt = WordCounter()
total_words = wt.totalWords(corpus)
#
# Build a dictionary with word frequency in the corpus and write it to a file
freqlist = {}
for word in dictionary:
word_freq = float(wt.countWords3(corpus_dict, word))/total_words
freqlist[dictionary.get(word)] = word_freq
# Sort words in topics by word frequency difference from the baseline frequency
if not os.path.exists(output+"/topics_wp"):
os.makedirs(output+"/topics_wp")
for i in range(topics_count):
ofile = open(output+"/topics_wp/topic"+str(i)+".txt", "w")
wtlist = []
for wtuple in lda.show_topic(i, len(dictionary.keys())):
freq_diff = wtuple[1] - freqlist[wtuple[0]]
wtlist.append((wtuple[0], freq_diff, wtuple[1], freqlist[wtuple[0]]))
wtlist = list(reversed(sorted(wtlist, key=lambda x: x[1])))
for ftuple in wtlist:
ofile.write(str(ftuple[0])+" "+str(ftuple[1])+" "+str(ftuple[2])+" "+str(ftuple[3])+"\n")
| StarcoderdataPython |
92510 | <reponame>liuxuan320/CAIL2021<filename>cg_t5_full/classfier.py
import argparse
import torch
from torch.nn.utils.rnn import pad_sequence
from module.tokenizer import T5PegasusTokenizer
from module.model import MT5PForSequenceClassificationSpan
def set_args():
"""设置训练模型所需参数"""
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained_model_path', default='./t5_pegasus_torch/', type=str, help='预训练的GPT2模型的路径')
parser.add_argument('--class_size', type=int, default=4, help='类别')
parser.add_argument('--class_hidden_size', type=int, default=768)
parser.add_argument('--span_layer', type=str, default="endpoint", help='span层的类型')
parser.add_argument('--class_proj_dim', type=int, default=256, help='span层的映射大小')
parser.add_argument('--use_proj', action='store_true', help='是否使用映射')
parser.add_argument('--generate_weight', type=int, default=1, help='生成模块权重')
parser.add_argument('--class_weight', type=int, default=1, help='分类模块权重')
return parser.parse_args()
class Classifier:
def __init__(self, vocab_path, model_path, device=0, rs_max_len=200, max_len=512):
self.tokenizer = T5PegasusTokenizer.from_pretrained(vocab_path)
args=set_args()
self.model = MT5PForSequenceClassificationSpan(args)
self.model.load_state_dict(torch.load(model_path))
self.device = device
self.model.to(device)
self.model.eval()
self.rs_max_len = rs_max_len
self.max_len = max_len
self.generate_max_len = rs_max_len
def find_longest_du(self, dus):
max_length = 0
max_index = -1
all_length = 0
for index, du in enumerate(dus):
all_length += len(du)
if len(du) > max_length:
max_length = len(du)
max_index = index
return max_index, all_length
def convert_feature(self, sample, tokenizer, rs_max_len, max_len):
"""
数据处理函数
Args:
sample: 一个字典,格式为{"du1": du1, "du2": du2}
Returns:
"""
input_ids = []
du1_tokens = tokenizer.tokenize(sample["du1"])
du2_tokens = []
for du2 in sample["du2"]:
du2_token = self.tokenizer.tokenize(du2)
du2_tokens.append(du2_token)
# 判断如果正文过长,进行截断
max_index, all_length = self.find_longest_du(du2_tokens)
while len(du1_tokens) + all_length > max_len - rs_max_len - (3 + len(du2_tokens)):
if len(du1_tokens) > len(du2_tokens[max_index]):
du1_tokens = du1_tokens[:-1]
else:
du2_tokens[max_index] = du2_tokens[max_index][:-1]
max_index, all_length = self.find_longest_du(du2_tokens)
# 生成模型所需的input_ids和token_type_ids
input_ids.append(self.tokenizer.cls_token_id)
input_ids.extend(self.tokenizer.convert_tokens_to_ids(du1_tokens))
input_ids.append(self.tokenizer.sep_token_id)
for du2_token in du2_tokens:
input_ids.extend(self.tokenizer.convert_tokens_to_ids(du2_token))
input_ids.append(self.tokenizer.sep_token_id)
return input_ids
def generate_results(self, output, tokenizer):
title_list = []
for i in range(len(output)):
title = ''.join(tokenizer.decode(output[i][1:], skip_special_tokens=True)).replace(' ', '')
title_list.append(title)
return title_list
def find_smallest_du(self, candidate_list):
min_length = 10000
min_index = -1
for index, du in enumerate(candidate_list):
if len(du) < min_length:
min_length = len(du)
min_index = index
return min_index
def clear_strings(self, candidates):
new_candidates = []
for candidate in candidates:
new_candidate = self.clear_string(candidate)
new_candidates.append(new_candidate)
while len(new_candidates) > 4:
small_index = self.find_smallest_du(new_candidates)
new_candidates.pop(small_index)
return new_candidates
def predict(self, question, candidate_list):
input_list = []
title_list = []
input_tensors=[]
question = self.clear_string(question)
candidate_list = self.clear_strings(candidate_list)
print(type(candidate_list))
print(len(candidate_list))
sample = {"du1": question, "du2": candidate_list}
input_ids = self.convert_feature(sample, self.tokenizer, self.rs_max_len, self.max_len)
input_list.append(torch.tensor(input_ids, dtype=torch.long))
with torch.no_grad():
input_list = pad_sequence(input_list, batch_first=True, padding_value=0)
input_tensors = torch.tensor(input_list).long().to(self.device)
text_output, class_output = self.model.generate(input_tensors,
decoder_start_token_id=self.tokenizer.cls_token_id,
eos_token_id=self.tokenizer.sep_token_id,
max_length=self.generate_max_len,return_prob=True)
title_list = self.generate_results(text_output, self.tokenizer)
return title_list[0]
def clear_string(self, sentence):
sentence = sentence.replace("\n", "")
sentence = sentence.replace("\r", "")
sentence = sentence.replace("\t", "")
return sentence
| StarcoderdataPython |
9703181 | <filename>src/test_baracca.py
import os
import scipy.io
import json
import numpy as np
import pickle
import cv2
import torch
import time
from torch.utils.data import DataLoader
# Import Datasets
from src.Datasets.Baracca import Baracca
# Import Model
from src.models.refinement import LinearModel
from src.models.refine_patch_2d import Patch_2D_Model
from src.models.pointnet import PointPatch
from src.models.module_utilizer import ModuleUtilizer
# Import Utils
from tqdm import tqdm
# Setting seeds
def worker_init_fn(worker_id):
# seed = torch.initial_seed() + worker_id
np.random.seed(torch.initial_seed() % 2 ** 32)
# random.seed(seed)
# torch.manual_seed(seed)
def world_to_depth(kpt_3d, angle):
Cx = 336.14
Cy = 231.349
Fx = 461.605
Fy = 461.226
width = 480
height = 640
if angle == 0 or angle == 360:
pass
elif angle == 90:
tmp = Cx
Cx = Cy
Cy = height - tmp
tmp = Fx
Fx = Fy
Fy = tmp
elif angle == 180:
Cx = width - Cx
Cy = height - Cy
elif angle == 270:
tmp = Cy
Cy = Cx
Cx = width - tmp
tmp = Fx
Fx = Fy
Fy = tmp
tmp = np.zeros((15, 2), dtype=np.float32)
# tmp[..., 0] = kpt_3d[..., 0] * Fx / 2200 + Cx
tmp[..., 0] = kpt_3d[..., 0] * Fx / kpt_3d[..., 2] + Cx
# tmp[..., 1] = -kpt_3d[..., 1] * Fy / 2200 + Cy
tmp[..., 1] = -kpt_3d[..., 1] * Fy / kpt_3d[..., 2] + Cy
return np.nan_to_num(tmp)
class PoseTest(object):
"""
DepthPose class for test only.
"""
def __init__(self, configer):
self.configer = configer
# DataLoader
self.data_loader = None
# Input and output size
self.input_size = configer.get('data', 'input_size')
self.output_size = configer.get('data', 'output_size')
self.data_path = configer["train_dir"]
# Module load and save utility
self.model_utility = ModuleUtilizer(self.configer)
self.net = None
# Other useful data
self.side = self.configer["side"]
self.result_dir = self.configer.get("data", "result_dir")
self.type = self.configer.get("data", "type")
self.dataset = self.configer.get("dataset").lower()
self.mean = 0.0
self.std = 0.0
self.ids_train = None
self.ids_test = None
self.img_saved = 0
def init_model(self):
"""
Load model function.
"""
# Selecting activation function
act = self.configer.get('network', 'activation')
if act == 'ReLU' or act == 'Relu' or act == 'relu' or act == 'ReLu':
activation = torch.nn.ReLU
elif act == 'Tanh' or act == 'tanh' or act == 'tan':
activation = torch.nn.Tanh
elif act == 'PReLU' or act == 'PRelu' or act == 'prelu' or act == 'PReLu' or act == 'Prelu':
activation = torch.nn.PReLU
else:
raise NotImplementedError(f"Wrong activation function: {act}")
# Selecting correct model and normalization variable based on type variable
# ToDO add new models for pcloud and voxel
if self.type == "base":
# Linear model for base type
self.net = LinearModel(self.input_size[0] * self.input_size[1],
self.output_size[0] * self.output_size[1],
self.configer.get('network', 'linear_size'),
self.configer.get('network', 'dropout'),
self.configer.get('network', 'batch_norm'),
self.configer.get('network', 'residual'),
activation)
elif self.type == "depth":
# 2D Depth patch model, choice based over model_name version
if self.configer.get("network", "model_name").lower() == "v1":
self.net = Patch_2D_Model_V1(self.configer.get("data", "output_size"), activation)
elif self.configer.get("network", "model_name").lower() == "v2":
self.net = Patch_2D_Model_V2(self.configer.get("data", "output_size"), activation)
elif self.configer.get("network", "model_name").lower() == "v3":
self.net = Patch_2D_Model_V3(self.configer.get("data", "output_size"), activation)
elif self.configer.get("network", "model_name").lower() == "v4":
self.net = Patch_2D_Model_V4(self.configer.get("data", "output_size"), activation)
else:
raise NotImplementedError(
"Model version: {} is not implemented".format(self.configer.get("network", "model_name")))
elif self.type == "pcloud":
if self.configer.get("network", "model_name").lower() == "v1":
self.net = PointPatch_channel(True, True, 0.2)
elif self.configer.get("network", "model_name").lower() == "v2":
self.net = PointPatch_batch(True, True, 0.2)
else:
raise NotImplementedError(
"Model version: {} is not implemented".format(self.configer.get("network", "model_name")))
else:
raise NotImplementedError("Type: {} not implemented yet".format(self.type))
if self.configer.get('resume') is not None:
print("Resuming checkpoints at: {}".format(self.configer.get('resume')))
else:
print("Warning! You're not resuming any known checkpoints for test operations!")
self.net, _, _, _ = self.model_utility.load_net(self.net)
# Selecting Dataset and DataLoader
if self.dataset == "baracca":
Dataset = Baracca
else:
raise NotImplementedError(f"Dataset not supported: {self.configer.get('dataset')}")
# Setting Dataloaders
self.data_loader = DataLoader(
Dataset(self.configer, split="test"),
1, shuffle=False,
num_workers=self.configer.get('solver', 'workers'), pin_memory=True, worker_init_fn=worker_init_fn)
def __test(self):
"""
Test function on multiple images.
"""
self.net.eval()
# Save inference time
tot_time = 0.0
with torch.no_grad():
for i, data_tuple in enumerate(tqdm(self.data_loader)):
"""
input, gt, visible, img_index
"""
inputs = data_tuple[0].cuda()
kpts_in = data_tuple[1].cpu().numpy()
visible = data_tuple[2].cpu().numpy()
ids = data_tuple[-1].cpu().numpy()
start = time.time()
# Inference
output = self.net(inputs)
# Saving inference time values
end = time.time()
tot_time += end - start
# loss = mse_masked(output, gt, data_tuple[2].cuda())
if self.type.lower() in ("depth", "pcloud") \
or (self.type.lower() == "base" and self.configer.get("offset") is True):
kpts_off = output.cpu().detach().numpy().astype(np.float32)
kpts_pred = kpts_in + kpts_off
else:
kpts_pred = output.cpu().detach().numpy().astype(np.float32)
self.kpts_dict[ids.item()] = kpts_pred[0] * np.expand_dims(visible[0], axis=1)
# Saving images
if self.configer.get("save_img") is True:
name = self.configer.get("data", "type")
self.save_images(ids, kpts_pred, kpts_in, visible, f"/homes/adeusanio/imgs/{name}")
self.img_saved += ids.shape[0]
def test(self):
print("Starting test procedure.")
start = time.time()
self.kpts_dict = np.zeros((2400, 15, self.configer["data", "input_size"][-1]))
self.__test()
if self.configer.get("save_pkl") is True and self.configer.get("from_gt") is False:
save_name = self.configer.get("checkpoints", "save_name")
with open(os.path.join("predictions", f"{save_name}_new.pkl"), "wb") as outfile:
pickle.dump(self.kpts_dict, outfile, protocol=pickle.HIGHEST_PROTOCOL)
print("Done in {}s".format(time.time() - start))
def save_images(self, ids, kpts, kpts_in, visible, out_path):
visible = visible[0]
kpts = kpts[0]
if self.configer["data", "kpts_type"].lower() == "3d":
seq = int(self.data_loader.dataset.imgs_path[ids.item()].split("/")[-2])
kpts2d = world_to_depth(kpts, int(self.data_loader.dataset.pos[str(seq)]['orientation'][0]))
kpts_in2d = world_to_depth(kpts_in, int(self.data_loader.dataset.pos[str(seq)]['orientation'][0]))
path = self.data_loader.dataset.imgs_path[ids.item()]
name = f"{ids.item()}.png"
img = cv2.imread(path, 3)
img = (img / img.max() * 255).astype(np.uint8)
for i, el in enumerate(kpts2d):
if visible[i] == 0:
continue
if i == 0:
cv2.circle(img, (int(el[0]), int(el[1])), 5, (0, 0, 255), -1)
else:
cv2.circle(img, (int(el[0]), int(el[1])), 5, (255, 0, 0), -1)
cv2.putText(img, f"{kpts[i][2]:.1f}", (int(el[0] - 30), int(el[1]) - 20), 1, 1, (255, 255, 255), 2)
cv2.imwrite(f"{out_path}/{name}", img)
| StarcoderdataPython |
3563957 | <filename>model/framework/neural_npfp/neural_npfp/results.py
import pandas as pd
import numpy as np
import torch
from model import *
import seaborn as sns
from matplotlib import pyplot as plt
from validation_updated import *
from prettytable import PrettyTable
from rdkit.Chem import AllChem, DataStructs, Draw
from rdkit import Chem
from rdkit.DataManip.Metric.rdMetricMatrixCalc import GetTanimotoSimMat
from rdkit.Chem.rdMolDescriptors import GetMorganFingerprintAsBitVect
from sklearn.metrics.pairwise import cosine_similarity
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit.Chem.rdMolDescriptors import CalcNumHeteroatoms, CalcFractionCSP3
from utils import *
import yaml
import matplotlib.patches as mpatches
from scipy.stats import ttest_ind, ttest_rel
from rdkit.Chem.Draw import MolsToGridImage
import argparse
#%% Colors
parser = argparse.ArgumentParser(description='List the content of a folder')
parser.add_argument("--input",default = "../data/trained_models/npl_nonorm_64/",const = "../data/trained_models/npl_nonorm_64/",nargs ="?",type = str,help="Path to the trained Models")
args = parser.parse_args()
my_white = "#f7f7f7"
my_grey = "#969696"
my_black = "#252525"
mfp_patch = mpatches.Patch(edgecolor = "black", facecolor = my_white,hatch ="xx", label='NC_MFP')
aux_patch = mpatches.Patch(edgecolor = "black", facecolor = my_grey, label='NP_AUX')
ae_patch = mpatches.Patch(edgecolor = "black", facecolor = my_white,hatch ="///", label='NP_AE')
baseline_patch = mpatches.Patch(edgecolor = "black", facecolor = my_white, label='Baseline')
ecfp_patch = mpatches.Patch(edgecolor = "black", facecolor = my_black, label='ECFP4')
#%% Load Data
print("Load Data \n")
fps = pd.read_csv("../data/precomputed_fingerprints.csv")
remove_val_mol = pd.read_pickle("../data/to_keep_molecules.pkl")
fps=fps.loc[remove_val_mol,:]
fps.reset_index(inplace =True, drop=True)
idx_list = list(range(fps.shape[0]))
random.seed(42)
random.shuffle(idx_list)
cv_chunks = np.array_split(idx_list,5)
val_chunks = []
train_chunks = []
for i in range(5):
val_chunks.append( cv_chunks[i])
train_chunks.append(np.concatenate(cv_chunks[:i]+cv_chunks[i+1:]))
#%% Load Trained Models
model_path = args.input
settings = yaml.safe_load(open(model_path+"settings.yml", "r"))
model_aux = MLP(settings["aux_model"]["layers"],1 ,settings["aux_model"]["dropout"])
model_baseline = MLP(settings["baseline_model"]["layers"],1 ,settings["baseline_model"]["dropout"])
model_ae =FP_AE(settings["ae_model"]["layers"],1+settings["ae_model"]["with_npl"],settings["ae_model"]["dropout"])
#%% Evaluate Model performance on Validation Sets
print("Evaluate model performance...\n")
results = np.zeros([5,6])
for i in range(5):
model_baseline.load_state_dict(torch.load(model_path+"baseline_cv"+str(i)+".pt"))
model_baseline.eval()
model_aux.load_state_dict(torch.load(model_path+"aux_cv"+str(i)+".pt"))
model_aux.eval()
model_ae.load_state_dict(torch.load(model_path+"ae_cv"+str(i)+".pt"))
model_ae.eval()
validation_set = fps.iloc[val_chunks[i],:2048]
np_val = fps.iloc[val_chunks[i],2048:]
pred, nprod ,_ = model_baseline(torch.tensor(validation_set.values, dtype = torch.float))
perc = expit(nprod.detach().clone().numpy())
results[i,0] = np.round(roc_auc_score(np_val.iloc[:,1], perc.reshape(-1)),4)
results[i,1]= np.round(roc_auc_score(np_val.iloc[:,1][np_val.iloc[:,0]<0], perc.reshape(-1)[np_val.iloc[:,0]<0]),4)
pred, nprod ,_ = model_aux(torch.tensor(validation_set.values, dtype = torch.float))
perc = expit(nprod.detach().clone().numpy())
results[i,2] = np.round(roc_auc_score(np_val.iloc[:,1], perc.reshape(-1)),4)
results[i,3]= np.round(roc_auc_score(np_val.iloc[:,1][np_val.iloc[:,0]<0], perc.reshape(-1)[np_val.iloc[:,0]<0]),4)
pred, nprod ,_ = model_ae(torch.tensor(validation_set.values, dtype = torch.float))
perc = expit(nprod[:,0].detach().clone().numpy())
results[i,4] = np.round(roc_auc_score(np_val.iloc[:,1], perc.reshape(-1)),4)
results[i,5]= np.round(roc_auc_score(np_val.iloc[:,1][np_val.iloc[:,0]<0], perc.reshape(-1)[np_val.iloc[:,0]<0]),4)
mean = np.round(np.mean(results,axis=0),4)
sd = np.round(np.std(results, axis=0),4)
to_print = pd.DataFrame(np.zeros([3,2]))
to_print.index = ["Baseline", "NP_AUX", "NP_AE"]
to_print.columns = ["AUC (SD)", "AUC NPL < 0 (SD)"]
for k in range(3):
to_print.iloc[k,0] = str(mean[2*k]) + " (" + str(sd[2*k]) +")"
to_print.iloc[k,1] = str(mean[2*k+1]) + " (" + str(sd[2*k+1]) +")"
print(to_print.to_latex())
#%% NP Validation
print("\nNP Identification Task")
res = list()
for i in range(5):
model_baseline.load_state_dict(torch.load(model_path+"baseline_cv"+str(i)+".pt"))
model_aux.load_state_dict(torch.load(model_path+"aux_cv"+str(i)+".pt"))
model_ae.load_state_dict(torch.load(model_path+"ae_cv"+str(i)+".pt"))
model_ae.cuda()
model_ae.eval()
model_aux.cuda()
model_aux.eval()
model_baseline.cuda()
model_baseline.eval()
aux = task1_validation(model_aux).values
ae = task1_validation(model_ae).values
baseline = task1_validation(model_baseline).values
res.append(np.vstack((aux,ae,baseline)))
ttest_ind(np.stack(res)[:,2,1], np.stack(res)[:,1,1])
mean_t1 = np.mean(np.stack(res),axis=0).round(3)
sd_t1 = np.std(np.stack(res),axis=0).round(3)
to_print = pd.DataFrame(np.zeros((4,2)))
to_print.index = ["NC_MFP", "NP_AUX", "NP_AE", "Baseline"]
to_print.columns = ["Model AUC (SD)", "Fingerprint AUC (SD)"]
for i in range(3):
to_print.iloc[i+1,0] = str(mean_t1[i,0]) + " (" +str(sd_t1[i,0]) + ")"
to_print.iloc[i+1,1] = str(mean_t1[i,1]) + " (" +str(sd_t1[i,1]) + ")"
to_print.iloc[0,0] = "-"
to_print.iloc[0,1] = mean_t1[0,2]
print(to_print.to_latex())
#%% Target Identification
print("\nTarget Identification Task")
auc = []
ef = []
auc_rank = []
ef_rank = []
for i in range(5):
model_baseline.load_state_dict(torch.load(model_path+"baseline_cv"+str(i)+".pt"))
model_aux.load_state_dict(torch.load(model_path+"aux_cv"+str(i)+".pt"))
model_ae.load_state_dict(torch.load(model_path+"ae_cv"+str(i)+".pt"))
model_ae.cuda()
model_ae.eval()
model_aux.cuda()
model_aux.eval()
model_baseline.cuda()
model_baseline.eval()
task2_results_baseline = task2_validation(model_baseline)
task2_results_aux = task2_validation(model_aux)
task2_results_ae = task2_validation(model_ae)
results = np.stack(task2_results_ae)
results_2 =np.stack(task2_results_aux)
results_3 = np.stack(task2_results_baseline)
results=np.hstack([results, results_2[:,4:8], results_3[:,4:8]])
auc_rank.append(pd.DataFrame(results[:,[0,12,4,16,8]]).rank(axis=1, ascending = False))
ef_rank.append(pd.DataFrame(results[:,[1,13,5,17,9]]).rank(axis=1, ascending = False))
auc.append(pd.DataFrame(results[:,[0,12,4,16,8]]))
ef.append(pd.DataFrame(results[:,[1,13,5,17,9]]))
auc_rank = np.stack(auc_rank)
ef_rank = np.stack(ef_rank)
mean_auc_rank = np.round(np.mean(np.mean(auc_rank,axis=0),axis=0),3)
mean_ef_rank = np.round(np.mean(np.mean(ef_rank,axis=0),axis=0),3)
sd_auc_rank = np.round(np.std(np.mean(auc_rank,axis=1),axis=0),3)
sd_ef_rank = np.round(np.std(np.mean(ef_rank,axis=1),axis=0),3)
to_print = pd.DataFrame(np.zeros([2,5]))
to_print.columns = ["NC_MFP", "NP_AUX", "NP_AE", "Baseline", "ECFP4"]
to_print.index = ["AUC (SD)", "EF 1% (SD)"]
for k in range(5):
to_print.iloc[0,k] = str(mean_auc_rank[k]) + " (" + str(sd_auc_rank[k]) +")"
to_print.iloc[1,k] = str(mean_ef_rank[k]) + " (" + str(sd_ef_rank[k]) +")"
print(to_print.to_latex())
auc = np.stack(auc)
ef = np.stack(ef)
mean_auc = np.mean(auc, axis=0)
se_auc =np.std(auc,axis=0)
#h_auc = se_auc * stats.t.ppf((1 + 0.95) / 2., 4)
h_auc = se_auc[:,1:-1]
mean_ef = np.mean(ef, axis=0)
np.median(mean_ef,axis=0)
se_ef =np.std(ef,axis=0)
h_ef = se_ef[:,1:-1]
mean_ef_print = np.mean(mean_ef,axis = 0 ).round(3)
mean_auc_print = np.mean(mean_auc,axis = 0 ).round(3)
sd_ef_print = np.std(mean_ef,axis = 0 ).round(3)
sd_auc_print = np.std(mean_auc,axis = 0 ).round(3)
to_print = pd.DataFrame(np.zeros([5,2]))
to_print.index = ["NC_MFP", "NP_AUX", "NP_AE", "Baseline", "ECFP4"]
to_print.columns = ["AUC (SD)", "EF 1% (SD)"]
for k in range(5):
to_print.iloc[k,0] = str(mean_auc_print[k]) + " (" + str(sd_auc_print[k]) +")"
to_print.iloc[k,1] = str(mean_ef_print[k]) + " (" + str(sd_ef_print[k]) +")"
print(to_print.to_latex())
#%%%
n_groups=7
index = np.arange(n_groups)
bar_width = 0.15
opacity = 0.8
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(10,7) )
ax[0].bar(index, mean_auc[:,0],bar_width,color = my_white,hatch ="xx", edgecolor="black")
ax[0].bar(index+bar_width*1, mean_auc[:,1],bar_width, edgecolor="black", color = my_grey,yerr = h_auc[:,0],capsize=3)
ax[0].bar(index+bar_width*2, mean_auc[:,2],bar_width, edgecolor="black", color = my_white,hatch ="///", yerr = h_auc[:,1],capsize=3)
ax[0].bar(index+bar_width*3, mean_auc[:,3],bar_width, edgecolor="black", color = my_white, yerr = h_auc[:,2],capsize=3)
ax[0].bar(index+bar_width*4, mean_auc[:,4],bar_width, edgecolor="black", color = my_black)
ax[0].spines['right'].set_visible(False)
ax[0].spines['top'].set_visible(False)
ax[0].set_ylabel("AUC")
ax[0].xaxis.set_ticks_position('bottom')
ax[0].set_xticks(index + 2*bar_width)
ax[1].bar(index, mean_ef[:,0],bar_width,color = my_white,hatch ="xx", edgecolor="black")
ax[1].bar(index+bar_width*1, mean_ef[:,1],bar_width,edgecolor="black",color = my_grey, yerr = h_ef[:,0],capsize=3)
ax[1].bar(index+bar_width*2, mean_ef[:,2],bar_width, edgecolor="black",color = my_white,hatch ="///", yerr = h_ef[:,1],capsize=3)
ax[1].bar(index+bar_width*3, mean_ef[:,3],bar_width, edgecolor="black", color = my_white, yerr = h_ef[:,2],capsize=3)
ax[1].bar(index+bar_width*4, mean_ef[:,4],bar_width, edgecolor="black", color = my_black)
ax[1].set_ylabel("EF 1%")
ax[1].xaxis.set_ticks_position('bottom')
ax[1].set_xticks(index + 2*bar_width)
ax[1].set_xticklabels( [str(x) for x in range(1,8)])
ax[1].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].set_xlabel('Target')
ax[1].legend(handles=[mfp_patch,aux_patch,ae_patch, baseline_patch, ecfp_patch], loc="upper center", bbox_to_anchor=(.45, -0.17), ncol=5, fancybox=False, frameon=False)
fig.tight_layout(pad=2)
plt.savefig("../results/plots/target_identification.pdf",format="pdf", dpi =300, bbox_inches='tight')
#%% t-tests task 2
np.mean(mean_ef, axis=0)
ttest_rel(mean_auc[:,3], mean_auc[:,0])
#%% NP and Target Identification
import warnings
from FPSim2 import FPSim2CudaEngine
warnings.filterwarnings("ignore")
print("NP and Target Identification")
print("This will take some time...")
if not os.path.exists("../results/np+target/"+model_path.split("/")[-2]):
os.makedirs("../results/np+target/"+model_path.split("/")[-2])
fp_nobinary = list()
for i in range(14):
aux_data = pd.read_csv("../data/validation_sets/np_target_identification/smiles_target" +str(i)+".csv")
to_drop = aux_data[(aux_data.npl>1) & ( aux_data.np==1)].index.tolist()
aux_data = aux_data.drop(to_drop,axis=0).reset_index(drop=True)
fp_nobinary.append([AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(smile),2,nBits=2048) for smile in aux_data.smiles])
for cv in range(5):
model_baseline.load_state_dict(torch.load(model_path+"baseline_cv"+str(cv)+".pt"))
model_aux.load_state_dict(torch.load(model_path+"aux_cv"+str(cv)+".pt"))
model_ae.load_state_dict(torch.load(model_path+"ae_cv"+str(cv)+".pt"))
model_ae.cuda()
model_ae.eval()
model_aux.cuda()
model_aux.eval()
model_baseline.cuda()
model_baseline.eval()
model_ll = [model_ae, model_aux, model_baseline]
label_ll = ["ae", "aux", "baseline"]
for k in range(3):
for i in range(14):
aux_data = pd.read_csv("../data/validation_sets/np_target_identification/smiles_target" +str(i)+".csv")
to_drop = aux_data[(aux_data.npl>1) & ( aux_data.np==1)].index.tolist()
aux_data = aux_data.drop(to_drop,axis=0).reset_index(drop=True)
fps_data = pd.read_csv("../data/validation_sets/np_target_identification/fps_target" +str(i)+".csv")
fps_data = fps_data.drop(to_drop,axis=0).reset_index(drop=True)
nnfp_model = model_ll[k](torch.tensor(fps_data.values, dtype =torch.float).cuda())[2]
nnfp ={"nnfp":pd.DataFrame(nnfp_model)}
npass_active = np.where((aux_data.active==1) & (aux_data.npass==1))[0]
results = pd.DataFrame(np.zeros([18,len(npass_active)]))
true_pos_rate = list()
for x in range(npass_active.shape[0]):
out=evaluate_fp(nnfp,fp_nobinary[i], aux_data.active,x)
out=pd.concat([out, aux_data.loc[out.index].drop("smiles",axis=1)],axis=1)
ordered_nnfp=out.sort_values("nnfp", ascending =False)
true_pos_rate.append(np.sum((ordered_nnfp.target==1)& (ordered_nnfp.np==1).iloc[:ordered_nnfp.shape[0]//100])/ np.sum((ordered_nnfp.np==1).iloc[:ordered_nnfp.shape[0]//100]))
target_list=[[x] for x in ordered_nnfp.target]
results.iloc[0,x]=CalcAUC(target_list,0)
results.iloc[1:3,x]=CalcEnrichment(target_list,0,[0.01, 0.025])
results.iloc[3,x]=np.sum(np.sum(ordered_nnfp.iloc[:ordered_nnfp.shape[0]//100,[5,6]],axis=1)==2)
results.iloc[4,x]=np.sum(ordered_nnfp.iloc[:ordered_nnfp.shape[0]//100,[6]]).values
target_list=[[x] for x in ((ordered_nnfp.target==1) & (ordered_nnfp.np==1))]
results.iloc[5,x]=CalcAUC(target_list,0)
results.iloc[6:8,x]=CalcEnrichment(target_list,0,[0.01, 0.025])
results.iloc[8,x] = np.mean(ordered_nnfp.npl.iloc[:ordered_nnfp.shape[0]//100])
ordered_ecfp=out.sort_values("ECFP", ascending =False)
target_list=[[x] for x in ordered_ecfp.target]
results.iloc[9,x]=CalcAUC(target_list,0)
results.iloc[10:12,x]=CalcEnrichment(target_list,0,[0.01, 0.025])
results.iloc[12,x]=np.sum(np.sum(ordered_ecfp.iloc[:ordered_ecfp.shape[0]//100,[5,6]],axis=1)==2)
results.iloc[13,x]=np.sum(ordered_ecfp.iloc[:ordered_ecfp.shape[0]//100,[6]]).values
target_list=[[x] for x in ((ordered_ecfp.target==1) & (ordered_ecfp.np==1))]
results.iloc[14,x]=CalcAUC(target_list,0)
results.iloc[15:17,x]=CalcEnrichment(target_list,0,[0.01, 0.025])
results.iloc[17,x] = np.mean(ordered_ecfp.npl.iloc[:ordered_ecfp.shape[0]//100])
results.to_csv("../results/np+target/"+model_path.split("/")[-2]+"/"+str(label_ll[k])+"_"+str(i)+"_cv"+str(cv)+".csv",index=False)
warnings.filterwarnings("default")
#%%
out_aux= pd.DataFrame(np.zeros([18,14]))
out_aux.index= ["AUC", "EF1", "EF2.5", "ActiveNP", "NP", "NP AUC", "EF1 NP", "EP2.5 NP", "Mean NPL"]*2
cv_out_aux = np.zeros([5,18,14])
for cv in range(5):
for i in range(14):
results = pd.read_csv("../results/np+target/"+model_path.split("/")[-2]+ "/aux_"+str(i)+"_cv"+str(cv)+".csv")
cv_out_aux[cv,:,i]=np.mean(results,axis=1)
out_aux.iloc[:,:]=np.mean(cv_out_aux, axis=0)
out_ae=pd.DataFrame(np.zeros([18,14]))
out_ae.index= ["AUC", "EF1", "EF2.5", "ActiveNP", "NP", "NP AUC", "EF1 NP", "EP2.5 NP", "Mean NPL"]*2
cv_out_ae = np.zeros([5,18,14])
for cv in range(5):
for i in range(14):
results = pd.read_csv("../results/np+target/"+model_path.split("/")[-2]+ "/ae_"+str(i)+"_cv"+str(cv)+".csv")
cv_out_ae[cv,:,i]=np.mean(results,axis=1)
out_ae.iloc[:,:]=np.mean(cv_out_ae, axis=0)
out_baseline=pd.DataFrame(np.zeros([18,14]))
out_baseline.index= ["AUC", "EF1", "EF2.5", "ActiveNP", "NP", "NP AUC", "EF1 NP", "EP2.5 NP", "Mean NPL"]*2
cv_out_baseline= np.zeros([5,18,14])
for cv in range(5):
for i in range(14):
results = pd.read_csv("../results/np+target/"+model_path.split("/")[-2]+ "/baseline_"+str(i)+"_cv"+str(cv)+".csv")
cv_out_baseline[cv,:,i]=np.mean(results,axis=1)
out_baseline.iloc[:,:]=np.mean(cv_out_baseline, axis=0)
auc = np.stack([cv_out_aux[:,5,:], cv_out_ae[:,5,:], cv_out_baseline[:,5,:], cv_out_aux[:,14,:]])
mean_auc = np.mean(auc, axis=1).transpose()
se_auc =np.std(auc,axis=1).transpose()
h_auc = se_auc * stats.t.ppf((1 + 0.95) / 2., 4)
h_auc = se_auc[:,:-1]
ef = np.stack([cv_out_aux[:,6,:], cv_out_ae[:,6,:], cv_out_baseline[:,6,:], cv_out_aux[:,15,:]])
np.mean(np.mean(ef,axis=2), axis=1)
mean_ef = np.mean(ef, axis=1).transpose()
se_ef = np.std(ef,axis=1).transpose()
h_ef = se_ef[:,:-1]
mean_ef_print = np.mean(mean_ef,axis = 0 ).round(3)
mean_auc_print = np.mean(mean_auc,axis = 0 ).round(3)
sd_ef_print = np.std(mean_ef,axis = 0 ).round(3)
sd_auc_print = np.std(mean_auc,axis = 0 ).round(3)
to_print = pd.DataFrame(np.zeros([4,2]))
to_print.index = [ "NP_AUX", "NP_AE", "Baseline", "ECFP4"]
to_print.columns = ["AUC (SD)", "EF 1% (SD)"]
for k in range(4):
to_print.iloc[k,0] = str(mean_auc_print[k]) + " (" + str(sd_auc_print[k]) +")"
to_print.iloc[k,1] = str(mean_ef_print[k]) + " (" + str(sd_ef_print[k]) +")"
print(to_print.to_latex())
#%%
n_groups=14
index = np.arange(n_groups)
bar_width = 0.2
opacity = 0.8
fig, ax = plt.subplots(nrows=2, ncols=1, sharex=True, figsize=(12,7) )
ax[0].bar(index, mean_auc[:,0],bar_width, edgecolor="black", color = my_grey,yerr = h_auc[:,0],capsize=3)
ax[0].bar(index+bar_width*1, mean_auc[:,1],bar_width, edgecolor="black", color = my_white,hatch ="///", yerr = h_auc[:,1],capsize=3)
ax[0].bar(index+bar_width*2, mean_auc[:,2],bar_width, edgecolor="black", color = my_white, yerr = h_auc[:,2],capsize=3)
ax[0].bar(index+bar_width*3, mean_auc[:,3],bar_width, edgecolor="black", color = my_black)
ax[0].spines['right'].set_visible(False)
ax[0].spines['top'].set_visible(False)
ax[0].set_ylabel("AUC")
ax[0].xaxis.set_ticks_position('bottom')
ax[0].set_xticks(index + 2*bar_width)
ax[1].bar(index+bar_width*0, mean_ef[:,0],bar_width,edgecolor="black",color = my_grey, yerr = h_ef[:,0],capsize=3)
ax[1].bar(index+bar_width*1, mean_ef[:,1],bar_width, edgecolor="black",color = my_white,hatch ="///", yerr = h_ef[:,1],capsize=3)
ax[1].bar(index+bar_width*2, mean_ef[:,2],bar_width, edgecolor="black", color = my_white, yerr = h_ef[:,2],capsize=3)
ax[1].bar(index+bar_width*3, mean_ef[:,3],bar_width, edgecolor="black", color = my_black)
ax[1].set_ylabel("EF 1%")
ax[1].xaxis.set_ticks_position('bottom')
ax[1].set_xticks(index + 2*bar_width)
ax[1].set_xticklabels( [str(x) for x in range(1,15)])
ax[1].spines['right'].set_visible(False)
ax[1].spines['top'].set_visible(False)
ax[1].set_xlabel('Target')
ax[1].legend(handles=[aux_patch,ae_patch, baseline_patch, ecfp_patch], loc="upper center", bbox_to_anchor=(.5, -0.17), ncol=5, fancybox=False, frameon=False)
fig.tight_layout(pad=2)
plt.savefig("../results/plots/np+target_identification.pdf",format="pdf", dpi =300, bbox_inches='tight')
#%% Create Plot Comparing plot pre vs post training
i= 1 # second cv
model_baseline.load_state_dict(torch.load(model_path+"baseline_cv"+str(i)+".pt"))
model_baseline.eval()
model_baseline.cuda()
model_aux.load_state_dict(torch.load(model_path+"aux_cv"+str(i)+".pt"))
model_aux.eval()
model_aux.cuda()
model_ae.load_state_dict(torch.load(model_path+"ae_cv"+str(i)+".pt"))
model_ae.eval()
#model_untrained = MLP(settings["aux_model"]["layers"],1 ,settings["aux_model"]["dropout"])
model_untrained = FP_AE(settings["ae_model"]["layers"],1+settings["ae_model"]["with_npl"],settings["ae_model"]["dropout"])
model_untrained.cuda()
model_untrained.eval()
i=0 # first target
aux_data = pd.read_csv("../data/validation_sets/np_target_identification/smiles_target" +str(i)+".csv")
to_drop = aux_data[(aux_data.npl>1) & ( aux_data.np==1)].index.tolist()
aux_data = aux_data.drop(to_drop,axis=0).reset_index(drop=True)
fps_data = pd.read_csv("../data/validation_sets/np_target_identification/fps_target" +str(i)+".csv")
fps_data = fps_data.drop(to_drop,axis=0).reset_index(drop=True)
nnfp_model_desc = model_aux(torch.tensor(fps_data.values, dtype =torch.float).cuda())[2]
nnfp_model_base = model_baseline(torch.tensor(fps_data.values, dtype =torch.float).cuda())[2]
nnfp_untrained = model_untrained(torch.tensor(fps_data.values, dtype =torch.float).cuda())[2]
nnfp_data_desc = nnfp_model_desc.cpu().detach().numpy()
nnfp_data_base = nnfp_model_base.cpu().detach().numpy()
nnfp_data_untrained= nnfp_untrained.cpu().detach().numpy()
ecfp=[AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles(x),2,nBits=2048) for x in aux_data.smiles]
ecfp_sim = [
DataStructs.FingerprintSimilarity(ecfp[0], x) for x in ecfp
]
nnfp_sim_cos_desc = [cosine_similarity(nnfp_data_desc[0].reshape(1,-1), nnfp_data_desc[x].reshape(1,-1))[0][0] for x in range(nnfp_data_desc.shape[0])]
nnfp_sim_cos_base = [cosine_similarity(nnfp_data_base[0].reshape(1,-1), nnfp_data_base[x].reshape(1,-1))[0][0] for x in range(nnfp_data_base.shape[0])]
nnfp_sim_cos_untrained= [cosine_similarity(nnfp_data_untrained[0].reshape(1,-1), nnfp_data_untrained[x].reshape(1,-1))[0][0] for x in range(nnfp_data_untrained.shape[0])]
activities =np.array(aux_data.active)
nps =np.array(aux_data.np)
#%% Correlation Analysis
fig, ax =plt.subplots(2,2, figsize=(10,5), sharex= "col")
ax[0][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where(nps[1:]==0)[0].tolist()], np.array(ecfp_sim[1:])[np.where(nps[1:]==0)[0].tolist()],"o",color =sns.color_palette("pastel")[7], markersize =4)
ax[0][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()],"o", mfc='none', color = sns.color_palette()[1] , markersize= 4)
ax[0][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()],"o", color = sns.color_palette()[2] , markersize= 4)
ax[0][0].set_xlabel("Similarity of NP_AUX Fingerprint")
ax[0][0].xaxis.set_tick_params(which='both', labelbottom=True)
ax[0][1].xaxis.set_tick_params(which='both', labelbottom=True)
ax[0][1].plot(np.array(nnfp_sim_cos_desc[1:])[np.where(nps[1:]==0)[0].tolist()], np.array(ecfp_sim[1:])[np.where(nps[1:]==0)[0].tolist()],"o",color =sns.color_palette("pastel")[7], markersize =4)
ax[0][1].plot(np.array(nnfp_sim_cos_desc[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()],"o", mfc='none',color = sns.color_palette()[1] , markersize= 4)
ax[0][1].plot(np.array(nnfp_sim_cos_desc[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()],"o", color = sns.color_palette()[2] , markersize= 4)
ax[1][1].set_xlabel("Similarity of NP_AUX Fingerprint")
ax[1][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where(nps[1:]==0)[0].tolist()], np.array(ecfp_sim[1:])[np.where(nps[1:]==0)[0].tolist()],"o",color =sns.color_palette("pastel")[7], markersize =4)
ax[1][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()],"o", mfc='none',color = sns.color_palette()[1] , markersize= 4)
ax[1][0].plot(np.array(nnfp_sim_cos_untrained[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()],"o", color = sns.color_palette()[2] , markersize= 4)
ax[1][1].plot(np.array(nnfp_sim_cos_base[1:])[np.where(nps[1:]==0)[0].tolist()], np.array(ecfp_sim[1:])[np.where(nps[1:]==0)[0].tolist()],"o",color =sns.color_palette("pastel")[7], markersize =4)
ax[1][1].plot(np.array(nnfp_sim_cos_base[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==0))[0].tolist()],"o", mfc='none', color = sns.color_palette()[1] , markersize= 4)
ax[1][1].plot(np.array(nnfp_sim_cos_base[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()], np.array(ecfp_sim[1:])[np.where((nps[1:]==1)& (activities[1:]==1))[0].tolist()],"o",color = sns.color_palette()[2] , markersize= 4)
ax[0][0].set_ylabel("ECFP Similarity")
ax[0][1].set_xlabel("Similarity of NP_AUX Fingerprint")
#ax[1].set_ylabel("Similarity of ECFP")
ax[1][0].set_xlabel("Similarity of Baseline Fingerprint")
ax[1][1].set_xlabel("Similarity of Baseline Fingerprint")
ax[1][0].set_ylabel("ECFP Similarity")
ax[1][1].set_ylabel("ECFP Similarity")
ax[0][1].set_ylabel("ECFP Similarity")
plt.tight_layout()
ax[0][0].set_title("Before Training")
ax[0][1].set_title("After Training")
plt.legend(labels=['Synthetic', "Inactive NP", "Active NP"],frameon=False, bbox_to_anchor=(-0.1, -0.48), fancybox=True,ncol=4,loc='lower center', prop={'size': 12})
plt.savefig("../results/plots/correlation_trainedVSuntrained.pdf",format="pdf", dpi =300, bbox_inches='tight')
#%%
print("Our Vs Ertls score on the ROR-Gamma Subset")
i=0 #first target
aux_data = pd.read_csv("../data/validation_sets/np_target_identification/smiles_target" +str(i)+".csv")
fps_data = pd.read_csv("../data/validation_sets/np_target_identification/fps_target" +str(i)+".csv")
model_desc = MLP(settings["aux_model"]["layers"],1 ,settings["aux_model"]["dropout"])
model_desc.load_state_dict(torch.load("../data/trained_models/npl_nonorm_64/aux_cv0.pt"))
model_desc.cuda()
model_desc.eval()
nnfp_model_desc = (model_desc(torch.tensor(fps_data.values, dtype =torch.float).cuda())[1].cpu().detach().flatten().numpy())
#nnfp_model_desc = abs(nnfp_model_desc)-x_bar
#sns.kdeplot(nnfp_model_desc)
plt.legend(["NN Score", "Ertl Score"])
sns.scatterplot(x=aux_data.npl[aux_data.np ==0], y=nnfp_model_desc[aux_data.np ==0] )
sns.scatterplot(x= aux_data.npl[aux_data.np ==1], y=nnfp_model_desc[aux_data.np ==1] )
plt.xlabel("Ertl Score")
plt.ylabel("NN Score")
plt.legend(["Synthetic", "NP"])
plt.savefig("../results/plots/ror_gamma_np_ertelvsours.svg",format="svg", bbox_inches='tight')
#Compute Correlation
molwt = [ExactMolWt(Chem.MolFromSmiles(x)) for x in aux_data.smiles]
numhetero = [CalcNumHeteroatoms(Chem.MolFromSmiles(x))/Chem.MolFromSmiles(x).GetNumAtoms() for x in aux_data.smiles]
sp3_fraction = [CalcFractionCSP3(Chem.MolFromSmiles(x)) for x in aux_data.smiles]
np.corrcoef(nnfp_model_desc,aux_data.npl)
correlation_comparison=pd.DataFrame(np.array([[np.corrcoef(nnfp_model_desc, molwt)[0,1],
np.corrcoef(nnfp_model_desc, numhetero)[0,1],
np.corrcoef(nnfp_model_desc, sp3_fraction)[0,1]],
[np.corrcoef(aux_data.npl, molwt)[0,1],
np.corrcoef(aux_data.npl, numhetero)[0,1],
np.corrcoef(aux_data.npl, sp3_fraction)[0,1]]]).transpose())
correlation_comparison.index= ["Molecular Weight", "Num. Heteroatoms", "Ratio SP3 Carbon"]
correlation_comparison.columns = ["Ours", "Ertl et. al."]
out_t = PrettyTable()
out_t.field_names = ["Property","Ours", "Ertl et. al."]
out_t.add_row(["Molecular Weight"]+[str(np.round(x,3)) for x in correlation_comparison.iloc[0,:]])
out_t.add_row(["Ratio Heteroatoms"]+[str(np.round(x,3)) for x in correlation_comparison.iloc[1,:]])
out_t.add_row(["Ratio SP3 Carbon"]+[str(np.round(x,3)) for x in correlation_comparison.iloc[2,:]])
print("Correlation between Properties and Natural Product Scores")
print(out_t)
to_save = out_t.get_string()
to_save= to_save.encode(encoding='UTF-8')
| StarcoderdataPython |
19289 | <reponame>jmollard/typhon<gh_stars>0
# -*- coding: utf-8 -*-
"""Functions to estimate the different sources of retrieval error.
"""
from typhon.oem import common
__all__ = [
'smoothing_error',
'retrieval_noise',
]
def smoothing_error(x, x_a, A):
"""Return the smoothing error through the averaging kernel.
Parameters:
x (ndarray): Atmospherice profile.
x_a (ndarray): A priori profile.
A (ndarray): Averaging kernel matrix.
Returns:
ndarray: Smoothing error due to correlation between layers.
"""
return A @ (x - x_a)
def retrieval_noise(K, S_a, S_y, e_y):
"""Return the retrieval noise.
Parameters:
K (np.array): Simulated Jacobians.
S_a (np.array): A priori error covariance matrix.
S_y (np.array): Measurement covariance matrix.
e_y (ndarray): Total measurement error.
Returns:
ndarray: Retrieval noise.
"""
return common.retrieval_gain_matrix(K, S_a, S_y) @ e_y
| StarcoderdataPython |
8002919 | from functools import reduce
from mpi4py import MPI
from itertools import islice
from datetime import datetime as time
import logging
from collections import defaultdict
from decomposer import Decomposer
class MapReduce(object):
def __init__(self, mapper, reducer, communicator=None, subsample=1, shuffler=None, prepartitioned=False ):
self.unsafe_mapper = mapper
self.unsafe_reducer = reducer
self.unsafe_shuffler = shuffler
self.subsample = subsample
self.communicator=communicator
self.prepartitioned=prepartitioned
self.logger=logging.getLogger('performance')
# safe reduce
def safeReducer(a, b):
if a is None:
return b
if b is None:
return a
return self.unsafe_reducer(a,b)
self.reducer=safeReducer
# safe map
def safeMap(arg):
self.logger.debug("Entered mapper")
try:
result= self.unsafe_mapper(arg)
self.logger.debug("Exiting mapper")
return result
except Exception as e:
self.logger.warn("Problem with map")
self.logger.warn(str(e))
return None
self.mapper=safeMap
if shuffler:
def safeShuffler(arg, count):
try:
return self.unsafe_shuffler(arg, count)
except Exception as e:
self.logger.warn("Problem with shuffle")
self.logger.warn(str(e))
return None
self.shuffler=safeShuffler
else:
self.shuffler=None
def execute(self, data):
if self.communicator and self.communicator.size>1:
return self.parallel(data)
else:
return self.serial(data)
def serial(self, data):
try:
count=len(data)
except AttributeError:
count=None
subsampled_data=Decomposer(data, subsample=self.subsample)
quantities= map(self.mapper, subsampled_data)
result = reduce(self.reducer, quantities)
return result
def parallel(self, data):
perfLogger=logging.getLogger('performance')
# local map
if self.prepartitioned:
partition=Decomposer(data,subsample=self.subsample)
else:
partition=Decomposer(data, self.communicator, subsample=self.subsample )
perfLogger.info("Built iterator")
quantities=map(self.mapper,partition)
perfLogger.info("Mapped")
local_result=reduce(self.reducer, quantities)
perfLogger.info("Local reduce")
# reduce under mpi
def reduce_arrays(x,y,dtype):
# the signature for the user defined op takes a datatype, which we can ignore
return self.reducer(x,y)
reducer_mpi=MPI.Op.Create(reduce_arrays, True)
perfLogger.debug("Local result: "+str(local_result)[0:60])
if self.shuffler:
perfLogger.info("Shuffling")
shuffled=defaultdict(dict)
if local_result:
for key in local_result:
shuffled[self.shuffler(key, self.communicator.size)][key]=local_result[key]
for root in range(self.communicator.size):
perfLogger.info("Reducing to rank "+str(root))
temp=self.communicator.reduce(shuffled[root],op=reducer_mpi,root=root)
if self.communicator.rank==root:
result=temp
else:
result = self.communicator.reduce(local_result, op=reducer_mpi, root=0)
result = self.communicator.bcast(result, root=0)
perfLogger.info("Global reduce")
reducer_mpi.Free()
return result
| StarcoderdataPython |
4996319 | <reponame>ZorkAxon/Perv<gh_stars>1-10
###
# Copyright (c) 2020, Zork
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import random
import re
import pickle
import supybot.world as world
from supybot import utils, plugins, ircutils, callbacks
from supybot.commands import *
from supybot import conf
try:
from supybot.i18n import PluginInternationalization
_ = PluginInternationalization('Perv')
except ImportError:
# Placeholder that allows to run the plugin on a bot
# without the i18n module
_ = lambda x: x
filename = conf.supybot.directories.data.dirize("Perv.db")
class Perv(callbacks.Plugin):
"""A collection of funny "perverted" triggers for my Axon friends."""
def loadDB(self):
try:
with open(filename, rb) as f:
self.db = pickle.load(f)
except Exception as e:
self.log.debug('Perv: Unable to load pickled database %s', e)
def exportDB(self):
try:
with open(filename, 'wb') as f:
pickle.dump(self.db, f, 2)
except Exception as e:
self.log.warning('Perv: Unable to write pickled database: %s', e)
def __init__(self, irc):
self.__parent = super(Perv, self)
self.__parent.__init__(irc)
self.defaultdb = {'JIZZED' : 0}
self.db = self.defaultdb
self.loadDB()
world.flushers.append(self.exportDB)
def die(self):
self.exportDB()
world.flushers.remove(self.exportDB)
self.__parent.die()
@wrap(['nick'])
def jizz (self, irc, msg, args, nick):
"""<nick>
"""
# For Debugging
# irc.reply('Trigger Working')
rnick = random.choice(list(irc.state.channels[msg.channel].users))
nick = nick
jizzed = self.db['JIZZED']
var = jizzed + 1
self.db['JIZZED'] = var
outcomes = {
"Starts pumping his peen... CUMS all over %s" % (nick),
"Starts pumping his peen... Prematurely ejaculates! This never happends baby!!",
"Starts pumping his peen... Gets a cramp and falls on his back spraying jizz on everyone!",
"Starts pumping his peen... Misses %s! Instead CUMS all over %s face" % (nick, rnick),
"Starts pumping his peen... BLOWS a giant load into %s ear" % (nick),
}
output = random.choice(list(outcomes))
joutput = "I have jizzed %s times" % var
self.exportDB()
if nick != rnick:
irc.reply(output, action=True)
irc.reply(joutput)
else:
txt = "Starts pumping his peen... Prematurely ejaculates! This never happends baby!!"
irc.reply(txt, action=True)
irc.reply(joutput)
pass
Class = Perv
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
| StarcoderdataPython |
5046411 | import os.path as osp
import torch
import torch.nn.functional as F
from torch_geometric.datasets import PPI
from torch_geometric.data import DataLoader
import numpy as np
# from torch_geometric.nn import GATConv
from nn_local import GATConvGumbel as GATConv
from utils_local import StepTau, ClassBoundaryLoss
from sklearn.metrics import f1_score
from collections import defaultdict
from models import ThreeLayerResidualGAT
path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'PPI')
train_dataset = PPI(path, split='train')
val_dataset = PPI(path, split='val')
test_dataset = PPI(path, split='test')
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
val_loader = DataLoader(val_dataset, batch_size=2, shuffle=False)
test_loader = DataLoader(test_dataset, batch_size=2, shuffle=False)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = ThreeLayerResidualGAT(
train_dataset, train_dataset.num_features).to(device)
loss_op = torch.nn.BCEWithLogitsLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.005)
margin_loss_settings = {'factor': 10, 'margin': 0.5}
class_boundary_loss = ClassBoundaryLoss(
margin=margin_loss_settings['margin'], )
def train(tau=[0, 0, 0], epoch=0, monitor_dict=None):
model.train()
total_loss = 0
cb_total_loss = 0.
for data in train_loader:
# import pdb; pdb.set_trace()
num_graphs = data.num_graphs
data.batch = None
data = data.to(device)
optimizer.zero_grad()
ret_val = model(data.x, data.edge_index, epoch, tau, monitor_dict)
preds, conv1_alpha = ret_val[0], ret_val[1]
cls_loss = loss_op(preds, data.y)
cb_loss = class_boundary_loss(
conv1_alpha, data.y[:], data.edge_index, torch.arange(data.x.shape[0]), nodes=data.x.shape[0]) * margin_loss_settings['factor']
total_loss += cls_loss.item() * num_graphs
cb_total_loss += cb_loss.item() * num_graphs
(cls_loss+cb_loss).backward()
optimizer.step()
return total_loss / len(train_loader.dataset), cb_total_loss / len(train_loader.dataset)
def test(loader):
model.eval()
ys, preds = [], []
for data in loader:
ys.append(data.y)
with torch.no_grad():
out = model(data.x.to(device), data.edge_index.to(device))
preds.append((out[0] > 0).float().cpu())
y, pred = torch.cat(ys, dim=0).numpy(), torch.cat(preds, dim=0).numpy()
return f1_score(y, pred, average='micro') if pred.sum() > 0 else 0
stepTau = StepTau(base_taus=[0, 0, 0], step_size=30, gamma=0.3, ori_init=True)
best_val_f1, best_test_f1, epoch_ = 0, 0, 0
monitor_dict = [defaultdict(list), defaultdict(list), defaultdict(list)]
# monitor_dict = None
for epoch in range(1, 101):
tau = stepTau.get_tau(epoch)
print(tau)
cls_loss, cb_loss = train(tau, epoch, monitor_dict)
val_f1 = test(val_loader)
test_f1 = test(test_loader)
if best_val_f1 <= val_f1:
best_val_f1, best_test_f1, epoch_ = val_f1, test_f1, epoch
print('Epoch: {:02d}, ClsLoss: {:.4f}, CBLoss: {:.4f}, Val: {:.4f}, Test: {:.4f}'.format(
epoch, cls_loss, cb_loss, val_f1, test_f1))
print('Best: Epoch_{:02d}, Val: {:.4f}, Test: {:.4f}'.format(
epoch_, best_val_f1, best_test_f1))
np.save('ppi_gat_sumtop2', np.array(monitor_dict))
| StarcoderdataPython |
3567741 | #
# [EXPERIMENTAL]
#
# Add databroker components to an mx app container
#
import atexit
import backoff
import os
import logging
import json
from buildpack.databroker import connect, streams
from buildpack.databroker.config_generator.scripts.configloader import (
configinitializer,
)
from buildpack.databroker.config_generator.scripts.generators import (
jmx as jmx_cfg_generator,
)
from buildpack.databroker.config_generator.scripts.utils import write_file
from buildpack.databroker.config_generator.templates.jmx import consumer
DATABROKER_ENABLED_FLAG = "DATABROKER_ENABLED"
RUNTIME_DATABROKER_FLAG = "DATABROKER.ENABLED"
APP_MODEL_HOME = "/home/vcap/app/model"
METADATA_FILE = os.path.join(APP_MODEL_HOME, "metadata.json")
DEP_FILE = os.path.join(APP_MODEL_HOME, "dependencies.json")
MAX_DATABROKER_COMPONENT_RESTART_RETRIES = 4
def is_enabled():
if os.environ.get(DATABROKER_ENABLED_FLAG) == "true":
logging.debug("Databroker is enabled")
return True
else:
return False
def is_producer_app():
if not is_enabled():
return False
with open(METADATA_FILE) as f:
metadata_json = json.load(f)
db_config = metadata_json.get("DataBrokerConfiguration")
if (
db_config != None
and db_config.get("publishedServices") != None
and len(db_config.get("publishedServices")) > 0
):
return True
else:
return False
def should_run_kafka_connect():
try:
return (
os.environ["CF_INSTANCE_INDEX"] != None
and int(os.environ["CF_INSTANCE_INDEX"]) == 0
)
except:
return False
def stage(install_path, cache_dir):
if not is_enabled():
return
connect.stage(install_path, cache_dir)
streams.stage(install_path, cache_dir)
class Databroker:
def __init__(self):
self.kafka_connect = None
self.kafka_streams = None
self.restart_retries = 0
self.is_producer_app = is_producer_app()
atexit.register(self.stop)
def __setup_configs(self, database_config):
metadata = open(METADATA_FILE, "rt")
dep = open(DEP_FILE, "rt")
complete_conf = configinitializer.unify_configs(
[metadata, dep], database_config
)
metadata.close()
dep.close()
return complete_conf
def get_datadog_config(self, user_checks_dir):
extra_jmx_instance_config = None
jmx_config_files = []
if is_enabled():
if self.is_producer_app:
# kafka connect cfg
os.makedirs(
os.path.join(user_checks_dir, "jmx_2.d"), exist_ok=True,
)
kafka_connect_cfg = (
jmx_cfg_generator.generate_kafka_connect_jmx_config()
)
kafka_connect_cfg_path = os.path.join(
user_checks_dir, "jmx_2.d", "conf.yaml"
)
write_file(
kafka_connect_cfg_path, kafka_connect_cfg,
)
# kafka streams cfg
os.makedirs(
os.path.join(user_checks_dir, "jmx_3.d"), exist_ok=True,
)
kafka_streams_cfg = (
jmx_cfg_generator.generate_kafka_streams_jmx_config()
)
kafka_streams_cfg_path = os.path.join(
user_checks_dir, "jmx_3.d", "conf.yaml"
)
write_file(
kafka_streams_cfg_path, kafka_streams_cfg,
)
jmx_config_files = [
kafka_connect_cfg_path,
kafka_streams_cfg_path,
]
else:
# consumer metrics setup
extra_jmx_instance_config = consumer.jmx_metrics
return (extra_jmx_instance_config, jmx_config_files)
def run(self, database_config):
if not self.is_producer_app:
return
logging.info("Databroker: Initializing components")
try:
complete_conf = self.__setup_configs(database_config)
if should_run_kafka_connect():
self.kafka_connect = connect.run(complete_conf)
self.kafka_streams = streams.run(complete_conf)
logging.info("Databroker: Initialization complete")
except Exception as ex:
logging.error(
"Databroker: Initialization failed due to {}".format(ex)
)
raise Exception("Databroker initailization failed") from ex
if not self.restart_if_any_component_not_healthy():
logging.error(
"Databroker: component restart retries exhaused. Stopping the app"
)
exit(0)
def stop(self):
if not self.is_producer_app:
return
if self.kafka_connect:
self.kafka_connect.stop()
if self.kafka_streams:
self.kafka_streams.stop()
def kill(self):
if not self.is_producer_app:
return
if self.kafka_connect:
self.kafka_connect.kill()
if self.kafka_streams:
self.kafka_streams.kill()
@backoff.on_predicate(
backoff.constant,
interval=10,
max_tries=MAX_DATABROKER_COMPONENT_RESTART_RETRIES,
)
def restart_if_any_component_not_healthy(self):
if not self.is_producer_app:
return True
result = True
if self.kafka_connect and not self.kafka_connect.is_alive():
self.kafka_connect.restart()
result = False
if self.kafka_streams and not self.kafka_streams.is_alive():
self.kafka_streams.restart()
result = False
return result
| StarcoderdataPython |
1944846 | # 21.05.29 조건부 표현식
score = 40
if score >= 60:
print('pass')
else :
print('fail')
print('pass') if score >= 60 else print('fail')
#[조건식이 참인경우] if [조건식] else [조건식이 거짓일 경우]
| StarcoderdataPython |
8064509 | from django.urls import path
from .views import Blogview,Blogdetailsview,LandingPageView,Objective1View,Objective2View,Objective3View,Objective4View,Objective5View,Objective6View
from .import views
urlpatterns = [
path('', LandingPageView, name='landingpage'),
path('blog/', Blogview, name='blog'),
path('blog-detail/<slug:slug>/',Blogdetailsview, name="blog-detail"),
path('objective1/', Objective1View, name='objective1'),
path('objective2/', Objective2View, name='objective2'),
path('objective3/', Objective3View, name='objective3'),
path('objective4/', Objective4View, name='objective4'),
path('objective5/', Objective5View, name='objective5'),
path('objective6/', Objective6View, name='objective6'),
path('projects/', views.projects, name='projects' )
]
| StarcoderdataPython |
139349 | <reponame>park-sungmoo/odqa_baseline_code
import random
import unittest
import wandb
from transformers import set_seed, DataCollatorWithPadding
from utils.tools import get_args
from utils.tools import update_args, run_test
from utils.trainer_qa import QuestionAnsweringTrainer
from utils.prepare import prepare_dataset, preprocess_dataset, get_reader_model, compute_metrics
args = get_args()
strategies = args.strategies
SEED = random.choice(args.seeds) # fix run_cnt 1
@run_test
class TestReader(unittest.TestCase):
def test_strategy_is_not_none(self, args=args):
self.assertIsNotNone(strategies, "전달받은 전략이 없습니다.")
def test_valid_strategy(self, args=args):
for strategy in strategies:
try:
update_args(args, strategy)
except FileNotFoundError:
assert False, "전략명이 맞는지 확인해주세요. "
def test_valid_dataset(self, args=args):
for seed, strategy in [(SEED, strategy) for strategy in strategies]:
args = update_args(args, strategy)
args.strategy, args.seed = strategy, seed
set_seed(seed)
try:
prepare_dataset(args, is_train=True)
except KeyError:
assert False, "존재하지 않는 dataset입니다. "
def test_valid_model(self, args=args):
for seed, strategy in [(SEED, strategy) for strategy in strategies]:
args = update_args(args, strategy)
args.strategy, args.seed = strategy, seed
set_seed(seed)
try:
get_reader_model(args)
except Exception:
assert False, "hugging face에 존재하지 않는 model 혹은 잘못된 경로입니다. "
def test_strategies_with_dataset(self, args=args):
"""
(Constraint)
- num_train_epoch 1
- random seed 1
- dataset fragment (rows : 100)
(Caution)
ERROR가 표시된다면, 상위 단위 테스트 결과를 확인하세요.
"""
for seed, strategy in [(SEED, strategy) for strategy in strategies]:
wandb.init(project="p-stage-3-test", reinit=True)
args = update_args(args, strategy)
args.strategy, args.seed = strategy, seed
set_seed(seed)
datasets = prepare_dataset(args, is_train=True)
model, tokenizer = get_reader_model(args)
train_dataset, post_processing_function = preprocess_dataset(args, datasets, tokenizer, is_train=True)
train_dataset = train_dataset.select(range(100)) # select 100
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.train.fp16 else None)
args.train.do_train = True
args.train.run_name = "_".join([strategy, args.alias, str(seed), "test"])
wandb.run.name = args.train.run_name
# TRAIN MRC
args.train.num_train_epochs = 1.0 # fix epoch 1
trainer = QuestionAnsweringTrainer(
model=model,
args=args.train, # training_args
custom_args=args,
train_dataset=train_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
post_process_function=post_processing_function,
compute_metrics=compute_metrics,
)
trainer.train()
| StarcoderdataPython |
4964001 | """
mqlalchemy.tests.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for our new query syntax.
"""
# :copyright: (c) 2016-2020 by <NAME> and contributors.
# See AUTHORS for more details.
# :license: MIT - See LICENSE for more details.
from __future__ import unicode_literals
import unittest
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, configure_mappers
from sqlalchemy.types import (
String, Integer, Boolean,
Date, DateTime, Float, Time)
import mqlalchemy
from tests.models import (
Album, Artist, Customer, Employee, Genre, Invoice, InvoiceLine,
MediaType, Playlist, Track)
from mqlalchemy import (
apply_mql_filters, convert_to_alchemy_type, InvalidMqlException)
import datetime
# Makes sure backref relationship attrs are attached to models
# e.g. Album.tracks doesn't work without either this or accessing
# Track.album first.
configure_mappers()
class MQLAlchemyTests(unittest.TestCase):
"""A collection of MQLAlchemy tests."""
def setUp(self):
"""Configure a db session for the chinook database."""
connect_string = "sqlite+pysqlite:///" + os.path.join(
os.path.dirname(os.path.abspath(__file__)), "chinook.sqlite")
self.db_engine = create_engine(connect_string)
self.DBSession = sessionmaker(bind=self.db_engine)
self.db_session = self.DBSession()
def test_db(self):
"""Make sure our test db is functional."""
result = self.db_session.query(Album).filter(
Album.album_id == 1).all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].artist_id == 1)
def test_simple_query(self):
"""Test a very simple mqlalchemy query."""
query = apply_mql_filters(
self.db_session,
Album,
{"album_id": 2}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].title == "Balls to the Wall")
def test_simple_prior_query(self):
"""Test a simple mqlalchemy query using a preformed query."""
query = self.db_session.query(Album).filter(
Album.artist_id == 2)
query = apply_mql_filters(
query,
Album,
{"album_id": 2}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].title == "Balls to the Wall")
def test_no_match(self):
"""Test that a query that should have no match works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7,
"playlist_id": 4}
)
result = query.all()
self.assertTrue(len(result) == 0)
def test_list_relation(self):
"""Test that a list relation .any query works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7}
)
result = query.all()
self.assertTrue(len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1))
def test_complex_list_relation(self):
"""Test that a multi-level list relation query works."""
query = apply_mql_filters(
self.db_session,
Album,
{"tracks.playlists.playlist_id": 18}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 48)
def test_more_complex_list_relation(self):
"""Test that a complex list relation query works."""
query = apply_mql_filters(
self.db_session,
Album,
{"tracks": {
"$elemMatch": {
"playlists.playlist_id": 18
}
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 48)
def test_complex_convert_name(self):
"""Test that converting from camelCase to underscore works."""
query = apply_mql_filters(
self.db_session,
Album,
{"TRACKS.PLAYLISTS.PLAYLIST_ID": 18},
convert_key_names_func=lambda txt: txt.lower()
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 48)
def test_explicit_elem_match(self):
"""Test that an explicit elemMatch."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks": {
"$elemMatch": {
"track_id": 7
}
}}
)
result = query.all()
self.assertTrue(len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1))
def test_implicit_elem_match(self):
"""Test that an implicit elemMatch works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks": {"track_id": 7}}
)
result = query.all()
self.assertTrue(len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1))
def test_list_relation_eq_fail(self):
"""Make sure we can't check a relation for equality."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"tracks": 7}
)
def test_list_relation_neq_fail(self):
"""Make sure we can't check a relation for inequality."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"tracks": {"$ne": 7}}
)
def test_non_list_relation(self):
"""Test that a non-list relation .has query works."""
query = apply_mql_filters(
self.db_session,
Album,
{"artist.artist_id": 275}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 347)
def test_attr_exists(self):
"""Test $exists on a simple attr."""
query = apply_mql_filters(
self.db_session,
Customer,
{"company": {"$exists": True}}
)
results = query.all()
self.assertTrue(len(results) == 10)
def test_attr_not_exists(self):
"""Test not $exists on a simple attr."""
query = apply_mql_filters(
self.db_session,
Customer,
{"company": {"$exists": False}}
)
results = query.all()
self.assertTrue(len(results) == 49)
def test_child_list_not_exists(self):
"""Test a child list can be filtered for being missing."""
query = apply_mql_filters(
self.db_session,
Artist,
{"albums": {"$exists": False}}
)
results = query.all()
self.assertTrue(len(results) == 71)
def test_child_list_exists(self):
"""Test a child list can be checked for existence."""
query = apply_mql_filters(
self.db_session,
Artist,
{"albums": {"$exists": True}}
)
results = query.all()
self.assertTrue(len(results) == 204)
def test_child_non_list_not_exists(self):
"""Test a non list child can be filtered for being missing."""
query = apply_mql_filters(
self.db_session,
Employee,
{"manager": {"$exists": False}}
)
results = query.all()
self.assertTrue(len(results) == 1)
def test_child_non_list_exists(self):
"""Test a non list child can be checked for existence."""
query = apply_mql_filters(
self.db_session,
Employee,
{"manager": {"$exists": True}}
)
results = query.all()
self.assertTrue(len(results) == 7)
def test_implicit_and(self):
"""Test that an implicit and query works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7,
"playlist_id": 1}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_explicit_and(self):
"""Test that the $and operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"$and": [
{"tracks.track_id": 7},
{"playlist_id": 1}
]}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_or(self):
"""Test that the $or operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"$or": [
{"tracks.track_id": 999999},
{"playlist_id": 1}
]}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_negation(self):
"""Test that the $not operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7,
"$not": {
"playlist_id": 1
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 8)
def test_nor(self):
"""Test that the $nor operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7,
"$nor": [
{"playlist_id": 1},
{"playlist_id": 999}
]}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 8)
def test_neq(self):
"""Test that the $ne operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7,
"playlist_id": {
"$ne": 1
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 8)
def test_lt(self):
"""Test that the $lt operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$lt": 2
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_lte(self):
"""Test that the $lte operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$lte": 1
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_eq(self):
"""Test that the new $eq operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$eq": 1
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 1)
def test_gte(self):
"""Test that the $gte operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$gte": 18
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 18)
def test_gt(self):
"""Test that the $gt operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$gt": 17
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 18)
def test_mod(self):
"""Test that the $mod operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$mod": [18, 0]
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].playlist_id == 18)
def test_mod_str_fail(self):
"""Test passing string values to $mod op fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$mod": ["test", "hey"]
}}
)
def test_mod_decimal_divisor_fails(self):
"""Test passing a decimal divisor to $mod op fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$mod": [2.2, 4]
}}
)
def test_mod_decimal_remainder_fails(self):
"""Test passing a decimal remainder to $mod op fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$mod": [2, 4.4]
}}
)
def test_mod_non_list(self):
"""Test passing a non list to $mod op rails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$mod": 5
}}
)
def test_mod_non_int_field(self):
"""Test trying to $mod a non int field fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"name": {
"$mod": 5
}}
)
def test_in(self):
"""Test that the $in operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$in": [1, 2]
}}
)
result = query.all()
self.assertTrue(
len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 2) or
(result[0].playlist_id == 2 and result[1].playlist_id == 1))
def test_in_nested(self):
"""Test that the $in operator works on nested objects."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": {
"$in": [7, 9999]
}}
)
result = query.all()
self.assertTrue(
len(result) == 2 and
((result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1)))
def test_in_non_list_fails(self):
"""Test that the $in op fails when not supplied with a list."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$in": 1
}}
)
def test_nin(self):
"""Test that the $nin operator works."""
query = apply_mql_filters(
self.db_session,
Playlist,
{"playlist_id": {
"$nin": [
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]
}}
)
result = query.all()
self.assertTrue(len(result) == 1 and
result[0].playlist_id == 1)
def test_nin_non_list_fails(self):
"""Test that the $nin op fails when not supplied with a list."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": {
"$nin": 1
}}
)
def test_like(self):
"""Test that the new $like operator works."""
query = apply_mql_filters(
self.db_session,
Employee,
{"first_name": {
"$like": "tev"
}}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].first_name == "Steve")
def test_elemmatch_fail(self):
"""Test that the $elemMatch operator properly fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Employee,
{"first_name": {
"$elemMatch": {"test": "test"}
}}
)
def test_nested_attr_query_fail(self):
"""Test that a nested attribute query fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Track,
{"track_id": {
"info": 5
}}
)
def test_bad_operator_fail(self):
"""Test that a invalid operator fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Track,
{"track_id": {
"$bad": 5
}},
["track_id"]
)
def test_empty_dict_fail(self):
"""Test that a nested attribute query fails."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"tracks": {}}
)
def test_whitelist(self):
"""Test that whitelisting works as expected."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"tracks.track_id": 7},
[]
)
self.assertFalse(
mqlalchemy._is_whitelisted(
Album,
"bad_attr_name",
["bad_attr_name"]
)
)
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7},
["tracks.track_id"]
)
result = query.all()
self.assertTrue(
len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1))
def test_custom_whitelist_func(self):
"""Test that providing a whitelist function works."""
def whitelist(attr_name):
if attr_name == "tracks.track_id":
return True
return False
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 7},
whitelist
)
result = query.all()
self.assertTrue(
len(result) == 2)
self.assertTrue(
(result[0].playlist_id == 1 and result[1].playlist_id == 8) or
(result[0].playlist_id == 8 and result[1].playlist_id == 1))
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"tracks.name": "Test"},
whitelist
)
def test_convert_to_int(self):
"""Test that we can convert a string to integer."""
self.assertTrue(convert_to_alchemy_type("1", Integer) == 1)
self.assertTrue(convert_to_alchemy_type(1, Integer) == 1)
def test_convert_to_float(self):
"""Test that we can convert a string to a float."""
self.assertTrue(convert_to_alchemy_type("1.1", Float) == 1.1)
self.assertTrue(convert_to_alchemy_type(1.1, Float) == 1.1)
def test_convert_to_bool(self):
"""Test that we can convert a value to a boolean."""
self.assertFalse(convert_to_alchemy_type("0", Boolean))
self.assertFalse(convert_to_alchemy_type("FaLSE", Boolean))
self.assertFalse(convert_to_alchemy_type(False, Boolean))
self.assertTrue(convert_to_alchemy_type("1", Boolean))
self.assertTrue(convert_to_alchemy_type("True", Boolean))
def test_convert_to_datetime(self):
"""Test that we can convert a value to a datetime."""
self.assertTrue(
convert_to_alchemy_type(
"2015-03-11 01:45:14", DateTime) ==
datetime.datetime(2015, 3, 11, 1, 45, 14))
self.assertTrue(
convert_to_alchemy_type(
datetime.datetime(2015, 3, 11, 1, 45, 14), DateTime) ==
datetime.datetime(2015, 3, 11, 1, 45, 14))
def test_convert_to_date(self):
"""Test that we can convert a value to a date."""
self.assertTrue(
convert_to_alchemy_type(
"2015-03-11", Date) == datetime.date(2015, 3, 11))
self.assertTrue(
convert_to_alchemy_type(
datetime.date(2015, 3, 1), Date) == datetime.date(2015, 3, 1))
def test_convert_to_time(self):
"""Test that we can convert a value to a time."""
self.assertTrue(
convert_to_alchemy_type(
"01:45:14", Time) == datetime.time(1, 45, 14))
self.assertTrue(
convert_to_alchemy_type(
datetime.time(1, 45, 14), Time) == datetime.time(1, 45, 14))
def test_convert_to_string(self):
"""Test that we can convert a string to integer."""
self.assertTrue(convert_to_alchemy_type(1, String) == "1")
self.assertTrue(convert_to_alchemy_type("Hello", String) == "Hello")
def test_convert_to_null(self):
"""Test that convert_to_alchemy_type properly returns None."""
self.assertTrue(convert_to_alchemy_type("null", String) is None)
self.assertTrue(convert_to_alchemy_type(None, String) is None)
def test_convert_fail(self):
"""Test that convert_to_alchemy_type properly fails."""
self.assertRaises(
TypeError,
convert_to_alchemy_type,
"blah",
None)
def test_get_attr_class_attributes(self):
"""Test that _get_class_attributes works."""
class_attrs = mqlalchemy._get_class_attributes(
Album,
"tracks.0.track_id")
self.assertTrue(len(class_attrs) == 4)
def test_stack_size_limit(self):
"""Make sure that limiting the stack size works as expected."""
query = apply_mql_filters(
self.db_session,
Album,
filters={
"album_id": 1,
"title": "For Those About To Rock We Salute You"},
stack_size_limit=10
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 1)
def test_stack_size_limit_fail(self):
"""Make sure that limiting the stack size fails as expected."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Album,
filters={
"album_id": 1,
"title": "For Those About To Rock We Salute You"},
stack_size_limit=1
)
def test_type_conversion_fail(self):
"""Make sure we can't check a relation for equality."""
self.assertRaises(
InvalidMqlException,
apply_mql_filters,
self.db_session,
Playlist,
{"playlist_id": "test"}
)
def test_self_referential_relation(self):
"""Test relationship chain leading to the same model."""
query = apply_mql_filters(
self.db_session,
Album,
{"tracks.album.album_id": 18}
)
result = query.all()
self.assertTrue(len(result) == 1)
self.assertTrue(result[0].album_id == 18)
def test_required_filters(self):
"""Test nested conditions are applied properly."""
nested_condition_log = {}
def nested_conditions(key):
"""Return required filters for a relation based on key name.
:param str key: Dot separated data key, relative to the
root model.
:return: Any required filters to be applied to the child
relationship.
"""
# Do some logging of how many times each key is hit
nested_condition_log[key] = (nested_condition_log.get(key) or 0) + 1
if key == "tracks":
return Track.album.has(Album.album_id != 18)
# Search playlist for a track that is explicitly excluded
# via required_filters
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 166},
nested_conditions=nested_conditions
)
self.assertTrue(nested_condition_log.get("tracks") == 1)
result = query.all()
self.assertTrue(len(result) == 0)
def test_required_filters_tuple(self):
"""Test nested conditions are applied properly as a tuple."""
nested_condition_log = {}
def nested_conditions(key):
"""Return required filters for a relation based on key name.
:param str key: Dot separated data key, relative to the
root model.
:return: Any required filters to be applied to the child
relationship.
:rtype: tuple
"""
# Do some logging of how many times each key is hit
nested_condition_log[key] = (nested_condition_log.get(key) or 0) + 1
if key == "tracks":
return tuple([Track.album.has(Album.album_id != 18)])
# Search playlist for a track that is explicitly excluded
# via required_filters
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 166},
nested_conditions=nested_conditions
)
self.assertTrue(nested_condition_log.get("tracks") == 1)
result = query.all()
self.assertTrue(len(result) == 0)
def test_nested_conditions_dict(self):
"""Test dict nested conditions are applied properly."""
nested_conditions = {
"tracks": (
Track.album.has(Album.album_id != 18))}
# Search playlist for a track that is explicitly excluded
# via required_filters
query = apply_mql_filters(
self.db_session,
Playlist,
{"tracks.track_id": 166},
nested_conditions=nested_conditions
)
result = query.all()
self.assertTrue(len(result) == 0)
if __name__ == '__main__': # pragma no cover
unittest.main()
| StarcoderdataPython |
6617588 | <gh_stars>1-10
__version__="0.1.0.1"
__usage__="""
_______ __ __ __ _ _______ __ __ ______ __ _ __ __
| || | | || | | || || | | || | | | | || |_| |
| _____|| |_| || |_| ||_ _|| |_| || _ || |_| || |
| |_____ | || | | | | || | | || || |
|_____ ||_ _|| _ | | | | || |_| || _ || |
_____| | | | | | | | | | | _ || || | | || ||_|| |
|_______| |___| |_| |__| |___| |__| |__||______| |_| |__||_| |_|
Version {} Authors: <NAME>, <NAME>, <NAME>
Contact: <EMAIL>
---------------------------------------------------------------------------------
synthdnm-classify -f <in.fam> -d <in.features.txt>
necessary arguments:
-f, --fam PATH PLINK pedigree (.fam/.ped) file
-d, --features PATH feature file
optional arguments:
-s, --snp_classifier PATH path to snp classifier joblib file
-l, --indel_classifier PATH path to indel classifier joblib file
-p, --keep_all_putative_dnms flag that retains all putative dnms (and their scores) in the output files
-h, --help show this message and exit
""".format(__version__)
import pandas as pd
# from sklearn.externals import joblib
import joblib
import os,sys
import numpy as np
def classify_dataframe(df = None, clf = None, ofh = None, mode = "a", keep_fp = False, features = None):
pd.options.mode.chained_assignment = None
df = df.replace([np.inf, -np.inf], np.nan)
df = df.dropna(axis=0,subset=df.columns[12:36])
# ClippingRankSum (temporary solution)
df["ClippingRankSum"] = 0
if df.empty:
# print("Empty dataframe.")
return 0
X = df[features].to_numpy()
df["pred"] = clf.predict(X)
df["prob"] = clf.predict_proba(X)[:,1]
if keep_fp == False:
df = df.loc[df["pred"] == 1]
with open(ofh, mode) as f:
df.to_csv(f, sep="\t", header = False, index=False)
def get_sex(fam_fh):
fam = open(fam_fh, "r")
fam_dict = {}
for line in fam:
linesplit = line.rstrip().split("\t")
iid = linesplit[1]
sex = linesplit[4]
fam_dict[iid] = sex
df = pd.Series(fam_dict).to_frame("sex")
df["iid"] = df.index
df.reset_index(inplace=True)
df.drop(columns=["index"],inplace=True)
return df
def classify(feature_table=None,keep_fp=False,pseud=None,fam_fh=None,clf_snv="snp_100-12-10-2-1-0.0-100.joblib",clf_indel="indel_1000-12-25-2-1-0.0-100.joblib"):
# Get classifiers
clf = joblib.load(clf_snv)
clf_indels = joblib.load(clf_indel)
# Make dataframe from input pydnm file
df = pd.read_csv(feature_table,sep="\t",dtype={"chrom": str})
# Get the list of features
columns = list(df.columns)
non_features = ['chrom', 'pos', 'ID', 'ref', 'alt', 'iid', 'offspring_gt', 'father_gt', 'mother_gt', 'nalt', 'filter', 'qual']
features = [elem for elem in columns if elem not in non_features]
df_fam = get_sex(fam_fh)
# pseud_chrX = pseud["chrX"]
# pseud_chrX_interval_one = pseud_chrX[0]
# pseud_chrX_interval_two = pseud_chrX[1]
# pseud_chrY = pseud["chrY"]
# pseud_chrY_interval_one = pseud_chrY[0]
# pseud_chrY_interval_two = pseud_chrY[1]
from pathlib import Path
feature_filename = feature_table
feature_file_stem = Path(feature_filename).stem
feature_file_parent = str(Path(feature_filename).parent) + "/"
feature_file_parent_stem = feature_file_parent + feature_file_stem
ofh = feature_file_parent_stem + ".preds.txt"
with open(feature_filename) as f:
header_list = f.readline().rstrip().split("\t")
header_list = header_list + ["sex","pred","prob"]
df_out = pd.DataFrame(columns = header_list)
df_out.to_csv(ofh, sep = "\t", index = False)
df['iid']=df['iid'].astype(str)
df_fam['iid']=df_fam['iid'].astype(str)
df = pd.merge(df, df_fam, on="iid")
df=df[~df["chrom"].str.contains("GL*")]
df["chrom"]=df["chrom"].astype(str)
df["chrom"] = df["chrom"].apply(lambda s: "chr" + s if not s.startswith("chr") else s)
df_autosomal_SNV = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)]
df_autosomal_indel = df.loc[(df["chrom"] != "chrY") & (df["chrom"] != "chrX") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))]
# df_female_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1)]
# df_female_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "2") &(df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1))]
# df_male_nonPAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
# df_male_nonPAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1')&(df["father_gt"]=='0/0') & (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
# df_male_nonPAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') & (df["mother_gt"]=='0/0') & ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
# df_male_nonPAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") & (df["offspring_gt"]=='1/1') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & ~(df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
# df_male_PAR_X_SNV = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
# df_male_PAR_Y_SNV = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& (df["ref"].str.len() == 1) & (df["alt"].str.len() == 1) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
# df_male_PAR_X_indel = df.loc[(df["chrom"] == "chrX") & (df["sex"] == "1") & (df["offspring_gt"]=='0/1') & (df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrX_interval_one[0],pseud_chrX_interval_one[1]) | df["pos"].between(pseud_chrX_interval_two[0], pseud_chrX_interval_two[1]))]
# df_male_PAR_Y_indel = df.loc[(df["chrom"] == "chrY") & (df["sex"] == "1") &(df["offspring_gt"]=='0/1') &(df["mother_gt"]=='0/0') &(df["father_gt"]=='0/0')& ((df["ref"].str.len() != 1) | (df["alt"].str.len() != 1)) & (df["pos"].between(pseud_chrY_interval_one[0],pseud_chrY_interval_one[1]) | df["pos"].between(pseud_chrY_interval_two[0], pseud_chrY_interval_two[1]))]
classify_dataframe(df = df_autosomal_SNV, clf = clf, ofh = ofh, features = features, keep_fp = keep_fp)
classify_dataframe(df = df_autosomal_indel,clf = clf_indels, ofh = ofh, features = features, keep_fp = keep_fp)
# classify_dataframe(df_female_X_SNV,clf,ofh_new)
# classify_dataframe(df_female_X_indel,clf_indels,ofh_new)
# classify_dataframe(df_male_nonPAR_X_SNV,clf_chrX_snps,ofh_new)
# classify_dataframe(df_male_nonPAR_Y_SNV,clf_chrY_snps,ofh_new)
# classify_dataframe(df_male_nonPAR_X_indel,clf_chrX_chrY_indels,ofh_new)
# classify_dataframe(df_male_nonPAR_Y_indel,clf_chrX_chrY_indels,ofh_new)
# classify_dataframe(df_male_PAR_X_SNV,clf,ofh_new)
# classify_dataframe(df_male_PAR_Y_SNV,clf,ofh_new)
# classify_dataframe(df_male_PAR_X_indel,clf_indels,ofh_new)
# classify_dataframe(df_male_PAR_Y_indel,clf_indels,ofh_new)
ofb = feature_file_parent_stem + ".preds.bed"
fout = open(ofb,"w")
f = open(ofh,"r")
make_output_bed(f = f, fout = fout)
def make_output_bed(f = None, fout = None):
f.readline()
for line in f:
linesplit = line.rstrip().split("\t")
chrom,pos,ref,alt,iid,pred,prob = linesplit[0],linesplit[1],linesplit[3],linesplit[4],linesplit[5],linesplit[-2],linesplit[-1]
pos_0 = str(int(pos)-1)
pos_1= str(int(pos) + len(ref) - 1)
ID_column = "{}:{}:{}:{}:{}:{}:{}".format(chrom,pos,ref,alt,iid,pred,prob)
row = "{}\t{}\t{}\t{}\n".format(chrom,pos_0,pos_1,ID_column)
fout.write(row)
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
import argparse
parser = argparse.ArgumentParser(usage=__usage__)
# Necessary arguments
parser.add_argument("-d","--features",required=True)
parser.add_argument("-f","--fam",required=True)
# Optional arguments
parser.add_argument("-s","--snp_classifier",required=False)
parser.add_argument("-i","--indel_classifier",required=False)
parser.add_argument('-p',"--keep_all_putative_dnms", action='store_true')
args = parser.parse_args()
# feature_filename = sys.argv[1]
feature_filename = args.features
# ped_filename = sys.argv[2]
ped_filename = args.fam
if args.snp_classifier:
snv_clf_filename = args.s
else: snv_clf_filename = "snp_100-12-10-2-1-0.0-100.joblib"
if args.indel_classifier:
indel_clf_filename = args.i
else: indel_clf_filename = "indel_1000-12-25-2-1-0.0-100.joblib"
keep_fp = args.keep_all_putative_dnms
classify(feature_table = feature_filename, fam_fh = ped_filename, clf_snv = snv_clf_filename, clf_indel = indel_clf_filename, keep_fp = keep_fp)
| StarcoderdataPython |
3523207 | <reponame>b0k0n0n/Hasami_Shogi_Python
A_CONSTANT = 97 # constant for ASCII value of 'a'
NUMBER_OF_PIECES = 9 # constant for number of pieces each side has to start
class HasamiShogiGame:
"""
Class that creates instance of playable Hasami Shogi Game
"""
def __init__(self):
"""
Parameter: none
Return: none
Data members:
_gameboard - blank to start but filled in init_game
_turn - will be used to set initial turn and each subsequent turn
_black_count - number of black pieces captured
_red_count - number of red pieces captured
"""
self._gameboard = []
self._turn = ""
self._black_count = 0
self._red_count = 0
self.init_game() # initializes gameboard
def square_to_coordinates(self, square):
"""
Takes parameter square and turns it into coordinates on the gameboard
:param square: square on board
:return: coordinates in numerical form
"""
# ord is used for ASCII value of first char in square; column is cast to INT to use as coordinates
row, column = ord(square[0]) - A_CONSTANT, int(square[1]) - 1
return row, column
def coordinates_to_square(self, row, column):
"""
Converts coordinates back to a specific square
:param row: int value of row to be converted
:param column: int value of column to be converted
:return: numeric coordinates into a specific square
"""
# revert back to grid square; since a = index[0], any row + A_CONSTANT = appropriate row
return chr(row + A_CONSTANT) + str(column + 1)
def init_game(self):
"""
Starts the game by setting number of pieces on each team to appropriate number (9),
sets start turn to "BLACK", and fills gameboard
"""
self._red_count = NUMBER_OF_PIECES
self._black_count = NUMBER_OF_PIECES
self._turn = "BLACK"
for row in range(NUMBER_OF_PIECES):
self._gameboard.append([])
for col in range(NUMBER_OF_PIECES):
piece = "R" if row == 0 else "B" if row == NUMBER_OF_PIECES - 1 else "*"
self._gameboard[row].append(piece)
def get_active_player(self):
"""
Returns player whose turn it is
:return: active player
"""
return self._turn
def get_enemy_player(self):
"""
Returns non-active player; used to check for captures
:return: non-active player
"""
return "BLACK" if self._turn == "RED" else "RED"
def toggle_players(self):
"""
Switches turn after current player's turn ends
"""
self._turn = "BLACK" if self._turn == "RED" else "RED"
def get_square_occupant(self, square):
"""
Gets occupant of square on board, if one exists
:param square: square on gameboard
:return: "RED", "BLACK", or "NONE"
"""
row, column = self.square_to_coordinates(square)
piece = self._gameboard[row][column] # piece, if there is one, on square being checked
return "RED" if piece == "R" else "BLACK" if piece == "B" else "NONE"
def set_square_occupant(self, square, value):
"""
Sets occupant of square; used after moves and captures
:param square: square being changed
:param value: what square should be set to
"""
row, column = self.square_to_coordinates(square)
self._gameboard[row][column] = value
def get_game_state(self):
"""
Returns whether either side won the game or the game is unfinished
:return: "RED_WON", "BLACK_WON", or "UNFINISHED"
"""
return "RED_WON" if self._black_count < 2 else "BLACK_WON" if self._red_count < 2 else "UNFINISHED"
def get_num_captured_pieces(self, color):
"""
Gets number of captured pieces of one color
:param color: color to check number of captured pieces
:return: number of pieces captured
"""
if color == "BLACK":
return NUMBER_OF_PIECES - self._black_count
elif color == "RED":
return NUMBER_OF_PIECES - self._red_count
def check_captures(self, square):
"""
Check for corner capture after move
:param square: square moved to
"""
corner_dict = {"a1": ["b1", "a2"], "a9": ["a8", "b9"], "i1": ["h1", "i2"], "i9": ["i8", "h9"]}
capture_pieces = [] # hold pieces corner captured
capture_counter = 0
active, enemy = self.get_active_player(), self.get_enemy_player()
# keys are corners, values are spaces to which if active player moves, corner capture should be checked
for key, value in corner_dict.items():
# if the space is in the value, that space is removed to know which space to check
if square in value:
corner_dict[key].remove(square)
# if corner occupant is enemy and piece in remaining value is active player, corner is captured
if self.get_square_occupant(key) == enemy and self.get_square_occupant(corner_dict[key][0]) == active:
capture_pieces.append(key)
# sets square to coordinates to iterate through list of lists
row, column = self.square_to_coordinates(square)
# rows have been converted to ints; checks rows for captures, decrementing to and including 0
if row > 1:
# holds potentially captured pieces
pieces = []
next_row = row - 1
next_piece = self.get_square_occupant(self.coordinates_to_square(next_row, column))
# while adjacent piece is enemy, since those are only pieces that can be captured
while next_piece == enemy:
pieces.append(self.coordinates_to_square(next_row, column))
next_row -= 1
# stop after last row is checked
if next_row < 0:
break
# get next piece to check
next_piece = self.get_square_occupant(self.coordinates_to_square(next_row, column))
# if you reach the same color piece, pieces have been captured
if next_piece == active:
for item in pieces:
capture_pieces.append(item)
# rows have been converted to ints; checks rows for captures, incrementing to and including 8
if row < 7:
# holds potentially captured pieces
pieces = []
next_row = row + 1
next_piece = self.get_square_occupant(self.coordinates_to_square(next_row, column))
# while adjacent piece is enemy, since those are only pieces that can be captured
while next_piece == enemy:
# add piece to captured pieces
pieces.append(self.coordinates_to_square(next_row, column))
next_row += 1
# stop after last row is checked
if next_row > 8:
break
# get next piece to check
next_piece = self.get_square_occupant(self.coordinates_to_square(next_row, column))
# if you reach the same color piece, pieces have been captured
if next_piece == active:
for item in pieces:
capture_pieces.append(item)
# checks columns for captures, decrementing to and including 0
if column > 1:
# holds potentially captured pieces
pieces = []
next_column = column - 1
next_piece = self.get_square_occupant(self.coordinates_to_square(row, next_column))
# while adjacent piece is enemy, since those are only pieces that can be captured
while next_piece == enemy:
# add piece to captured pieces
pieces.append(self.coordinates_to_square(row, next_column))
next_column -= 1
# stop after last column is checked
if next_column < 0:
break
# get next piece to check
next_piece = self.get_square_occupant(self.coordinates_to_square(row, next_column))
# if you reach the same color piece, pieces have been captured
if next_piece == active:
for item in pieces:
capture_pieces.append(item)
# checks columns for captures, incrementing to and including 8
if column < 7:
# holds potentially captured pieces
pieces = []
next_column = column + 1
next_piece = self.get_square_occupant(self.coordinates_to_square(row, next_column))
while next_piece == enemy:
# add piece to captured pieces
pieces.append(self.coordinates_to_square(row, next_column))
next_column += 1
# stop after last column is checked
if next_column > 8:
break
# get next piece to check
next_piece = self.get_square_occupant(self.coordinates_to_square(row, next_column))
# if you reach the same color piece, pieces have been captured
if next_piece == active:
for item in pieces:
capture_pieces.append(item)
# sets value at squares captured to "*" to indicate square is empty
for piece in capture_pieces:
self.set_square_occupant(piece, "*")
capture_counter += 1
# decrements appropriate number of pieces for enemy player based on # of pieces captured
if enemy == "BLACK":
self._black_count -= capture_counter
else:
self._red_count -= capture_counter
def make_move(self, move_from, move_to):
"""
Allows active player to make move if valid
:param move_from: square from which active player wants to move
:param move_to: square to which active player wants to move
:return: True or False depending on whether move is valid or not valid
"""
if self.get_game_state() != "UNFINISHED":
return False
# you can't move to the spot you are already in
if move_from == move_to:
return False
# gets color of player trying to move
active = self.get_active_player()
# gets occupant of origin square and destination square and ensures value are active player and NONE
piece_from, piece_to = self.get_square_occupant(move_from), self.get_square_occupant(move_to)
if piece_from != active or piece_to != "NONE":
return False
# sets each square to grid coordinates to check list of lists (gameboard)
from_row, from_column = self.square_to_coordinates(move_from)
to_row, to_column = self.square_to_coordinates(move_to)
# ensures move is linear; must move either along same row or same column for move to be valid
same_row = from_row == to_row
same_col = from_column == to_column
if not (same_row or same_col):
return False
# sets step for range check based on which way needs to be checked: left or right
if same_row:
step = 1 if from_column < to_column else -1
for col in range(from_column + step, to_column, step):
# if space is vacant, no capture
if self._gameboard[from_row][col] != "*":
return False
# sets step for range check based on which way needs to be checked: up or down
if same_col:
step = 1 if from_row < to_row else -1
for row in range(from_row + step, to_row, step):
# if space is vacant, no capture
if self._gameboard[row][from_column] != "*":
return False
# if move is valid, set move from square to "*", indicating square is vacant
self.set_square_occupant(move_from, "*")
# if move is valid, set move to square to color of active player
self.set_square_occupant(move_to, active[0])
# call check_captures to see if the square moved to results in any captures
self.check_captures(move_to)
# turn has ended, so change active player to other player
self.toggle_players()
return True | StarcoderdataPython |
11395259 | import minehost
# Ensure that the latest are installed
# pip install -U setuptools wheel
# Build package
# python3 setup.py sdist bdist_wheel
# Upload to Test PyPi
# twine upload -R testpypi dist/*
# Upload to PyPi
# twine upload dist/*
# Install package from Test PyPi
# pip install --index-url https://test.pypi.org/simple/ <package-name>
def main():
session = minehost.Session()
try:
with open("session-id.ignore.txt", "r") as f:
session.cookies.set("PHPSESSID", f.read())
if not session.isValid():
raise minehost.InvalidSessionException
except (IOError, minehost.InvalidSessionException):
session.login("<EMAIL>", "5<PASSWORD>")
acc = minehost.Account(session=session)
server = acc.getServer()
print(server.getInfo())
with open("session-id.ignore.txt", "w") as f:
f.write(session.cookies.get("PHPSESSID"))
if __name__ == "__main__":
main() | StarcoderdataPython |
308319 | # -*- coding: utf-8 -*-
"""Core classes and utilities supporting Bible references.
For parsing human readable references, see parse.py.
>>> from biblelib.reference import core
# construct a reference from individual elements
>>> vref = core.Verseref(book=62, chapter=3, verse=4)
Verseref('bible.62.3.4')
>>> vref.userstring()
'Mk 4:2'
>>> vref.refly_url()
'https://ref.ly/logosref/Bible.Mk4.2'
# construct from a bible data type reference string
>>> vref = core.makeBiblerefFromDTR('bible.62.4.2')
>>> vref
Verseref('bible.62.4.2')
# checks for valid chapter/verse combinations (somewhat)
>>> core.Verseref(book=62, chapter=33, verse=1)
biblelib.ReferenceValidationError: Invalid chapter index: 33
>>> core.Verseref(book=62, chapter=3, verse=99)
biblelib.ReferenceValidationError: Invalid verse index: 99
See test_reference.py for detailed usage examples.
This doesn't handle:
* chapter-verse ranges (Matt 4-5:12)
TODO:
- clean up properties vs methods
- VerseRef.cmp seems broken
- add todict() methods
"""
import re
import sys
import warnings
from .books import Book
from .biblebooks import Abbreviations
# datatypes for internal-style references
# probably not complete
HUMAN_BIBLE_DATATYPES = {'Bible': 'bible',
'BibleNRSV': 'bible+nrsv',
'BibleBHS': 'bible+bhs',
'BibleLXX': 'bible+lxx',
'BibleLXX2': 'bible+lxx2',
'BibleESV': 'bible+esv',
'BibleNA27': 'bible+na27',
'BibleSBLGNT': 'bible+sblgnt',
'BibleLEB': 'bible+leb2',
}
MACHINE_BIBLE_DATATYPES = {v: k for k, v in HUMAN_BIBLE_DATATYPES.items()}
BIBLE_DATATYPES = HUMAN_BIBLE_DATATYPES.values()
class BiblelibError(Exception):
"""Something bad happened in biblelib."""
pass
class ReferenceValidationError(BiblelibError):
"""Returned when validating a reference fails."""
pass
# class hierarchy:
# GenericBibleref
# - Bibleref
# - Bookref
# - Chapterref
# - Verseref
# - RangeChapterref
# - RangeVerseref
class GenericBibleref(object):
"""Abstract class for all Bible references."""
_cache = {} # symbol table for Biblerefs
level = None
# tuples of start/end indices to simplify subsumption checking
_rangeindices = dict()
# incomplete but covers the most important cases
canon_traditions = ['Catholic', 'Jewish', 'Protestant']
abbreviations = Abbreviations()
def __init__(self, bibletype='bible'):
"""Create an instance of a Bibleref object.
BIBLETYPE is a bible datatype in BIBLE_DATATYPES.
"""
assert self.__class__ != GenericBibleref, \
"Bibleref is an interface, and can't be instantiated"
assert bibletype in BIBLE_DATATYPES, "Invalid bible datatype: {}".format(bibletype)
self.bibletype = bibletype
def __repr__(self):
return f"{type(self).__name__}('{self.refid}')"
__str__ = __repr__
def __len__(self): raise NotImplementedError
def __hash__(self): return hash(self.refid)
def _compatible_args(self, other):
"""Return a tuple of two tuples of indices.
For non-range references, both tuples are the same.
"""
assert (isinstance(self, GenericBibleref) and
isinstance(other, GenericBibleref)), \
f'order is only defined between Bibleref instances: {self}, {other}'
return ((self.start.indices(), self.end.indices()),
(other.start.indices(), other.end.indices()))
def __eq__(self, other):
selfindices, otherindices = self._compatible_args(other)
return selfindices == otherindices
def __lt__(self, other):
selfindices, otherindices = self._compatible_args(other)
return selfindices < otherindices
def __le__(self, other):
return self < other or self == other
# selfindices, otherindices = self._compatible_args(self, other)
# return selfindices < otherindices or selfindices == otherindices
def datatypestring(self):
"""Return a data type string reference like 'bible.11.16.34'."""
return self.refid
def leveleq(self, other):
"""
True iff both SELF and OTHER are Bibleref objects at the same
level.
"""
return isinstance(self, GenericBibleref) and \
isinstance(other, GenericBibleref) and \
self.level == other.level
class Bibleref(GenericBibleref):
"""Generic class for simple Bible references: don't instantiate
this directly. Includes reference to BIBLE (datatype). """
def __init__(self, *args, **kwargs):
assert self.__class__ != Bibleref, \
"Bibleref is an interface, and can't be instantiated"
GenericBibleref.__init__(self, *args, **kwargs)
self.params = ['bibletype']
self.refid = self._makerefid() # sep. function so subclasses can override
def _makerefid(self):
paramlist = [getattr(self, x) for x in self.params]
assert all(paramlist), \
'Null element in paramlist: {}'.format(paramlist)
return '.'.join([str(getattr(self, x)) for x in self.params])
def indices(self):
"""Return a tuple of all available level indices. """
return tuple([getattr(self, x) for x in self.params])
# I'm not sure this method makes sense throughout: YAGNI?
def sublevel_length(self, canon_tradition='Protestant'):
"""How many subunits are there?
For non-Protestant canons, this is counting books in books.py,
not the traditional canon organizations.
"""
assert canon_tradition in self.canon_traditions, \
'Invalid canon_tradition {} should be in {}'.format(canon_tradition, self.canon_traditions)
if canon_tradition == 'Protestant': return 66
elif canon_tradition == 'Catholic': return 87 # correct?
elif canon_tradition == 'Jewish': return 39
def __len__(self):
return 1 # by definition these are single references
class Bookref(Bibleref):
"""
Reference to BIBLE and BOOK, without chapter and verse.
"""
def __init__(self, book=0, *args, **kwargs):
"""Book is a numeric index """
Bibleref.__init__(self, *args, **kwargs)
self.book = int(book)
self.level = 'book'
self.params.append(self.level)
self.refid = self._makerefid()
self._rangeindices[self.level] = (self.book, self.book)
self._bookdata = Book(self.book)
def sublevel_length(self):
"""Assuming canon_tradition='Protestant' here. """
return self._bookdata.get_finalchapter()
def refdict(self, withbibletype=False, withbook=True, withchapter=True, withverse=True):
"""Return a dict with keys bible, book, chapter, verse.
Book is the abbreviation. Chapter and verse are integers. You
can leave out book for contextual end references.
"""
refdict = {}
if hasattr(self, 'book') and withbook:
refdict['book'] = self._bookdata.ldlsrefname
if hasattr(self, 'bibletype') and withbibletype:
refdict['bibletype'] = MACHINE_BIBLE_DATATYPES.get(self.bibletype)
if hasattr(self, 'chapter') and withchapter:
refdict['chapter'] = self.chapter
if hasattr(self, 'verse') and withverse:
refdict['verse'] = self.verse
return refdict
def userstring(self, language="en", **kwargs):
"""Return a user-readable string reference with book abbreviations.
Specifies book as well as bible, chapter and verse unless
withbibletype/chapter/verse is False.
"""
refdict = self.refdict(**kwargs)
ref = ''
if 'bibletype' in refdict:
ref += "{bibletype}:".format(**refdict)
if 'book' in refdict:
book = self.abbreviations.abbreviation_for_en(refdict['book'], language=language)
ref += "{0}".format(book)
if 'chapter' in refdict:
ref += " {chapter}".format(**refdict)
if 'verse' in refdict:
ref += ":{verse}".format(**refdict)
return ref
def _make_uri(self):
"Common code for making URI strings. "
refdict = self.refdict(withbibletype=True)
ref = "{bibletype}.{book}".format(**refdict)
if 'chapter' in refdict:
ref += str(refdict.get('chapter'))
if 'verse' in refdict:
ref += '.{}'.format(refdict.get('verse'))
return ref
def refly_url(self):
"""Return a ref.ly URL for self. """
return "https://ref.ly/logosref/{}".format(self._make_uri())
def logosref_uri(self):
"""Return a string for self under the Logos URI Protocol.
See https://wiki.lrscorp.net/logosref_Protocol. """
return "logosref:{}".format(self._make_uri())
def get_chapters(self):
return self._bookdata.get_chapters()
def has_chapter(self, index):
return self._bookdata.has_chapter(index)
class Chapterref(Bookref):
"""A reference to Book and Chapter, without verse. """
def __init__(self, chapter=0, *args, **kwargs):
Bookref.__init__(self, *args, **kwargs)
self.chapter = int(chapter)
self.level = 'chapter'
self.params.append(self.level)
self.refid = self._makerefid()
self._rangeindices[self.level] = (self.chapter, self.chapter)
if not self._bookdata.has_chapter(self.chapter):
raise ReferenceValidationError("Invalid chapter index: %d" % self.chapter)
# for consistency with ranges
self.start = self
self.end = self
def _subcheck(self, other):
Bookref._subcheck(self, other)
assert self.book == other.book, \
"Subtraction undefined for different books: %s and %s" % (self, other)
def sublevel_length(self):
return self._bookdata.get_finalverse(self.chapter)
def subsumes(self, other):
"""
True if OTHER's level is at or below SELF's, and OTHER has the
same indexes as each of SELF's. This hierarchical inclusion is
perhaps subtly different from range subsumption.
"""
# other's level is below if it's not in self's params
if self.level in other.params:
if (isinstance(self, Chapterref) and
isinstance(other, RangeVerseref)):
# other has no chapter attrs (start and end might differ)
return (self.book == other.book and
self.chapter == other.start.chapter and
self.chapter == other.end.chapter)
else:
return all(lambda p: getattr(other, p) == getattr(self, p),
self.params)
def get_finalverse(self):
return self._bookdata.get_finalverse(self.chapter)
# hack! assumes the first verse is 1, which isn't always true
def toVerseref(self):
"Return a Verseref to verse 1 in the chapter"
return Verseref(book=self.book, chapter=self.chapter, verse=1)
class Verseref(Chapterref):
"""A simple reference to BOOK, CHAPTER, and VERSE.
Assumes chapters whose first verse has index=1.
"""
def __init__(self, verse=0, **kwargs):
Chapterref.__init__(self, **kwargs)
self.verse = verse
self.level = 'verse'
self.verseindex = self._bookdata.get_vindex(self.chapter, self.verse)
self.params.append(self.level)
self._makerefid()
self._rangeindices[self.level] = (self.verse, self.verse)
if not self._bookdata.has_chapterandverse(self.chapter, self.verse):
errmsg = "Invalid verse index {} for chapter={}".format(self.verse, self.chapter)
raise ReferenceValidationError(errmsg)
# for consistency with RangeVerseref
self.start = self
self.end = self
# override of Bibleref method to handle Ps titles
def _makerefid(self):
paramvals = [str(getattr(self, x)) for x in self.params]
# tinker if the verse value is '0'
if paramvals[-1] == '0':
paramvals[-1] = 'title'
self.refid = '.'.join(paramvals)
assert all([getattr(self, x) for x in self.params]), \
'Null element in %s' % self.refid
return self.refid
# this signals a problem with my class model :-/
def sublevel_length(self):
raise NotImplementedError("Verserefs don't have sub levels")
# this may seem silly, but enables easier intersection checking for RangeVerseref
def enumerateverses(self):
"""Return a list of self, a degenerate case of enumeration. """
return [self]
def intersection(self, other, sort=False):
"""Return the common verses between SELF and OTHER.
Vacuous case for consistency: if OTHER == SELF, return
list(SELF), else ().
"""
assert (isinstance(other, GenericBibleref) and other.level == 'verse'), \
"intersection not defined for %s" % other
if self == other:
return [self]
elif isinstance(other, RangeVerseref) and self in other.EnumerateVerses():
return [self]
else:
return []
# # added coverage for this, but this really ought to be integrated further with
# # RangeChapterref
# class RangeBookref(GenericBibleref):
# """
# A range format is a composite of start and end Bookref objects.
# Some functionality isn't provided here
# """
# def __init__(self, start, end, force=False, validate=False):
# """With FORCE, make it a range even if it isn't."""
# GenericBibleref.__init__(self)
# assert (isinstance(start, Bibleref) and isinstance(end, Bibleref)), \
# "start %s and end %s must both be Bibleref objects" % (start, end)
# (self.start, self.end) = (start, end)
# assert (start.bibletype == end.bibletype), \
# "start %s and end %s must be in the same bible" % (start, end)
# self.bibletype = self.start.bibletype
# self.book = self.start.book
# assert start.leveleq(end), \
# "start %s and end %s must be at the same level" % (start, end)
# self.level = self.start.level
# self.params = self.start.params
# assert self.start.index <= self.end.index, \
# "start %s must precede end %s" % (start, end)
# self._rangeindices['book'] = (getattr(self.start, 'book'),
# getattr(self.end, 'book'))
# # like the Lbx parser, the end part includes book, chapter,
# # and verse, even if redundant with the start
# shortid = self.end.id[len(self.end.bibletype)+1:]
# self.id = "%s-%s" % (self.start.id, shortid)
# def userstring(self, withbibletype=False):
# """Return a string reference in traditional format using the
# LDLS book abbreviations, like '1 Ki 16:34'.
# If WITHBIBLETYPE is True, include the bible datatype.
# """
# # return Unicode with an emdash
# return u"{}–{}".format(self.start.userstring(withbibletype=withbibletype),
# self.end.userstring())
# def logos_bible_url(self, domain='http://bible.logos.com',
# passage='passage', version='NIV'):
# """
# Return a URL for this reference at bible.logos.com.
# """
# return "%s/%s/%s/%s" % (domain, passage, version,
# self.userstring().replace(':', '.'))
# def Validate(self):
# """Raise a BiblerefValidationError if SELF isn't a well-formed range
# (vacuous ranges are allowed). Checks the component start and
# end as well."""
# self.start.Validate()
# self.end.Validate()
# return True
# def indices(self):
# """Return a start/end tuple of tuples of indices for start and
# end."""
# return (self.start.indices, self.end.indices)
# def GetBookname(self, nametype='userstringname'):
# return self.start.GetBookname(nametype=nametype)
# def _levelsubsumes(self, other, level):
# """
# True iff SELF's indices at LEVEL are the same as OTHER's or
# subsume it.
# """
# (selfir, otherir) = (self._rangeindices, other._rangeindices)
# if level in selfir and level in otherir:
# return (otherir.get(level)[0] >= selfir.get(level)[0] and
# otherir.get(level)[1] <= selfir.get(level)[1])
# elif hasattr(self, level) and hasattr(other, level):
# # both have this level: values must be the same
# return getattr(self, level) == getattr(other, level)
# def Subsumes(self, other):
# """True if OTHER's level is at or below SELF's, and OTHER's
# range is within SELF's (inclusive, including the case of a
# single verse reference). This also means any range subsumes
# itself. May give bogus results for bogus ranges."""
# if self.level in other.params:
# # all common levels must have the same values or subsuming ones
# return all(lambda p: self._levelsubsumes(other, p),
# self.params)
# # be nice to have intersection, but that's hard to do right for chapters
# # def intersection(self, other):
# # ToDo:
# # enumeration (more generally) and indexing and iteration
# def __len__(self):
# """The number of items at self.level between start and end,
# inclusive. So 1:1-1:2 is size 2, not 1, and the smallest range
# length is 1."""
# return (self.end - self.start) + 1
# # "rich" comparison is only partiall defined for RangeChapterref and subs
# # i'm not sure what the semantics of lt/gt would be in general:
# # Subsumes and Overlaps are clearer
# # does this fully replace __cmp__() ?
# def __eq__(self, other):
# return (self.__class__ == other.__class__ and
# self.start == other.start and
# self.end == other.end)
# def __ne__(self, other):
# return ((self.__class__ != other.__class__) or
# (self.start != other.start) or
# (self.end != other.end))
class RangeChapterref(GenericBibleref):
"""Range of chapters, e.g. Mark 1-4.
Both start and end must be at the same level. Cross-bible and
cross-book ranges are not allowed.
"""
def __init__(self, start, end, force=False, validate=False):
"""With FORCE, make it a range even if it isn't."""
GenericBibleref.__init__(self)
assert (isinstance(start, Chapterref) and isinstance(end, Chapterref)), \
"start %s and end %s must both be Chapterref objects" % (start, end)
assert ((start.bibletype == end.bibletype) and (start.book == end.book)), \
"start %s and end %s must be in the same bible and book" % (start, end)
assert start.leveleq(end), \
"start %s and end %s must be at the same level" % (start, end)
(self.start, self.end) = (start, end)
self.bibletype = self.start.bibletype
self.book = self.start.book
self._bookdata = self.start._bookdata
self.level = self.start.level
self.params = self.start.params
assert self.start.chapter <= self.end.chapter, \
"start %s must precede end %s" % (start, end)
self._rangeindices['chapter'] = (getattr(self.start, 'chapter'),
getattr(self.end, 'chapter'))
# end part includes book, chapter, and verse
shortrefid = self.end.refid[len(self.end.bibletype)+1:]
self.refid = "%s-%s" % (self.start.refid, shortrefid)
def userstring(self, language="en", withbibletype=False):
"""Return a string reference in traditional format using the
LDLS book abbreviations, like '1 Ki 16:34'. This is how
reference attributes in data elements are formatted. If
WITHBIBLETYPE is True, include the bible datatype."""
start_refdict = self.start.refdict()
ref = "{0} {1}".format(self.abbreviations.abbreviation_for_en(start_refdict['book'], language=language),
start_refdict['chapter'])
if self.end.chapter != self.start.chapter:
ref += u"–{}".format(self.end.chapter)
if self.level == 'verse':
ref += ":{}".format(self.end.verse)
elif self.level == 'verse':
ref += u"–{}".format(self.end.verse)
return ref
def refly_url(self):
"""Return a ref.ly URL for self. """
return "https://ref.ly/logosref/{}-{}".format(self.start._make_uri(),
self.end.chapter)
def logosref_uri(self):
"""Return a string for self under the Logos URI Protocol.
See https://wiki.lrscorp.net/logosref_Protocol. """
return "logosref:{}-{}".format(self.start._make_uri(), self.end.chapter)
def indices(self):
"""Return a start/end tuple of tuples of indices for start and
end."""
return (self.start.indices(), self.end.indices())
def _levelsubsumes(self, other, level):
"""
True iff SELF's indices at LEVEL are the same as OTHER's or
subsume it.
"""
(selfir, otherir) = (self._rangeindices, other._rangeindices)
if level in selfir and level in otherir:
return (otherir.get(level)[0] >= selfir.get(level)[0] and
otherir.get(level)[1] <= selfir.get(level)[1])
elif hasattr(self, level) and hasattr(other, level):
# both have this level: values must be the same
return getattr(self, level) == getattr(other, level)
def sublevel_length(self):
# """
# Return the number of verses in all the component chapters
# """
# total = 0
# bbook = Book(self.book)
# for chap in range(len(self)):
# index = chap + self.start.chapter
# total += bbook.GetFinalVerse(index)
# return total
raise NotImplementedError("RangeChapterrefs don't have sub levels")
def subsumes(self, other):
# """True if OTHER's level is at or below SELF's, and OTHER's
# range is within SELF's (inclusive, including the case of a
# single verse reference). This also means any range subsumes
# itself. May give bogus results for bogus ranges."""
# if self.level in other.params:
# # all common levels must have the same values or subsuming ones
# return all(lambda p: self._levelsubsumes(other, p),
# self.params)
raise NotImplementedError
# be nice to have intersection, but that's hard to do right for chapters
# def intersection(self, other):
# def rangeweight(self, other):
# """
# Given that SELF subsumes OTHER (a GenericBibleref instance at
# the same level), return a weight. Default is
# len(other)/len(self).
# """
# return len(other)/float(len(self))
# def rangeedge(self, other):
# """A little more weight if first or last in a reference"""
# return self.rangeweight(other) * 2
# ToDo:
# enumeration (more generally) and indexing and iteration
def __len__(self):
"""The number of items at self.level between start and end,
inclusive. So 3-4 has length 2, not 1, and the smallest range
length is 1."""
return (int(self.end.chapter) - int(self.start.chapter)) + 1
# "rich" comparison is only partiall defined for RangeChapterref and subs
# i'm not sure what the semantics of lt/gt would be in general:
# Subsumes and Overlaps are clearer
# does this fully replace __cmp__() ?
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.start == other.start and
self.end == other.end)
def __ne__(self, other):
return not(self.eq(other))
class RangeVerseref(RangeChapterref):
"""
A composite of start and end Verseref objects.
"""
def __init__(self, start, end, **kwargs):
"""With FORCE, make it a range even if it isn't."""
RangeChapterref.__init__(self, start=start, end=end, **kwargs)
assert self.start.verseindex <= self.end.verseindex, \
"start %s must precede end %s" % (start, end)
# not sure how these are used, so could be wrong
self._rangeindices['verse'] = (self.start.verseindex, self.end.verseindex)
# potentially inefficient use of VerserefFromIndex
def enumerateverses(self):
"""Return an ordered list of Simpleref instances for
the individual verses in SELF."""
verses = []
cursor = self.start.verseindex
try:
while (cursor <= self.end.verseindex):
verses.append(VerserefFromIndex(book=self.book, index=cursor))
cursor += 1
except:
raise ValueError("EnumerateVerses failed on %s" % self)
return verses
# this signals a problem with my class model :-/
def sublevel_length(self):
raise NotImplementedError("RangeVerserefs don't have sub levels")
def userstring(self, language="en", withbibletype=False):
"""Return a string reference in traditional format using the
LDLS book abbreviations, like '1 Ki 16:33-34'. This is how
reference attributes in data elements are formatted."""
userstring = "{0}–{1}".format(self.start.userstring(language), self.end.verse)
if self.start.chapter != self.end.chapter:
userstring = "{0}–{1}:{2}".format(self.start.userstring(language), self.end.chapter, self.end.verse)
return userstring
def refly_url(self):
"""Return a ref.ly URL for self. """
refly_url = "https://ref.ly/logosref/{}-{}".format(self.start._make_uri(),
self.end.verse)
if self.start.chapter != self.end.chapter:
refly_url = "https://ref.ly/logosref/{}-{}:{}".format(self.start._make_uri(),
self.end.chapter, self.end.verse)
return refly_url
def logosref_uri(self):
"""Return a string for self under the Logos URI Protocol.
See https://wiki.lrscorp.net/logosref_Protocol. """
return "logosref:{}-{}".format(self.start._make_uri(), self.end.verse)
def intersection(self, other, sort=False):
"""Return the common verses between SELF and OTHER.
The order is undefined unless SORT=True.
"""
assert (isinstance(other, GenericBibleref) and other.level == 'verse'), \
"intersection not defined for %s" % other
isection = list(set(self.EnumerateVerses()).intersection(set(other.EnumerateVerses())))
if sort:
isection.sort()
return isection
def __len__(self):
"""The number of items at self.level between start and end,
inclusive. So 3-4 has length 2, not 1, and the smallest range
length is 1."""
return (self.end.verseindex - self.start.verseindex) + 1
# ##### Utilities for constructing Bibleref objects
# # verse=0 is now valid for Psalm titles
# # ToDo: something useful with errors
def makeBibleref(bibletype='bible', book=0, chapter=0, verse=-1, errors='strict'):
"""
Factory method that builds a Book/Chapter/Verseref, and also
registers its id in a symbol table so an existing object is
returned rather than recreated.
"""
if isinstance(verse, str):
verse = re.sub('(?<=[0-9])[a-z]+$', '', str(verse)) #KLUDGE!!!!!!!
params = [bibletype]
# bad hack
if verse == 'title':
verse = 0
(book, chapter, verse) = map(int, [book, chapter, verse])
if book:
params.append(book)
if chapter:
params.append(chapter)
if verse > -1:
params.append(verse)
refid = '.'.join([str(x) for x in params])
if refid in GenericBibleref._cache:
return GenericBibleref._cache.get(refid)
else:
if chapter:
if verse > -1:
obj = Verseref(bibletype=bibletype, book=book, chapter=chapter, verse=verse)
else:
obj = Chapterref(bibletype=bibletype, book=book, chapter=chapter)
else:
obj = Bookref(bibletype=bibletype, book=book)
GenericBibleref._cache[obj.refid] = obj
return obj
# # ToDo: something useful with errors
def makeRangeref(start=None, end=None, bibletype='bible', errors='strict'):
# fragile shortcut!
shortrefid = end.refid[len(end.bibletype)+1:]
refid = "%s-%s" % (start.refid, shortrefid)
if refid in GenericBibleref._cache:
return GenericBibleref._cache.get(refid)
else:
# gotcha: you may be surprised that isinstance(Verseref, Chapterref) ==
# True. So always test Verseiness before Chapteriness.
if isinstance(start, Verseref) and isinstance(end, Verseref):
obj = RangeVerseref(start=start, end=end)
# mixed verse and chapter: convert the chapter to verseref as well
elif isinstance(start, Verseref) and isinstance(end, Chapterref):
# handles bible.1.1.12-1.23: convert the end to a verse ref as well
obj = RangeVerseref(start=start, end=end.toVerseref())
elif isinstance(end, Verseref) and isinstance(start, Chapterref):
# handles bible.1.12-1.13.3: convert the start to a verse ref as well
obj = RangeVerseref(start=start.toVerseref(), end=end)
elif isinstance(start, Chapterref) and isinstance(end, Chapterref):
obj = RangeChapterref(start=start, end=end)
elif isinstance(start, Bookref) and isinstance(end, Bookref):
obj = RangeBookref(start=start, end=end)
# not handling mixed type with book
else:
raise ValueError('Invalid input to makeRangeref: {}, {}'.format(start, end))
GenericBibleref._cache[obj.refid] = obj
return obj
def VerserefFromIndex(bibletype='bible', book=0, index=0):
"""
Given BOOK and a zero-based INDEX into its verses, return the
corresponding Verseref object. Minimal range checking on INDEX.
"""
book = Book(int(book))
chapter, verse = book.get_vindex_chapter_verse(index)
return Verseref(bibletype=bibletype, book=book, chapter=chapter, verse=verse)
def makeBiblerefFromDTR(ref, errors='strict'):
"""Return a Bibleref object for a data type reference like u'bible.64.3.16'.
If errors=='filter', return None: use this for bad input. If
errors=='ignore', just return the (possibly invalid) input
unconverted.
"""
def fullmatch(regexp, string):
# fullmatch is Python3 only
if sys.version_info.major < 3:
match = regexp.match(string)
if match and match.start() == 0 and match.end() == len(string):
return match
else:
return regexp.fullmatch(string)
assert errors in ['strict', 'ignore', 'filter'], 'Invalid errors value: {}'.errors
range_regexp = re.compile(r"(?P<start>.+)[-|–](?P<end>.+)")
try:
if fullmatch(range_regexp, ref):
range_match = fullmatch(range_regexp, ref)
(start, end) = range_match.groups()
(bible, startref) = start.split('.', 1)
end = "%s.%s" % (bible, end)
startref = makeBibleref(**dict(zip(['bibletype', 'book', 'chapter', 'verse'],
start.split('.')),
errors=errors))
endref = makeBibleref(**dict(zip(['bibletype', 'book', 'chapter', 'verse'],
end.split('.')),
errors=errors))
return makeRangeref(start=startref, end=endref, errors=errors)
else:
ref = re.sub(r'\.title$', '.0', ref)
return makeBibleref(**dict(zip(['bibletype', 'book', 'chapter', 'verse'],
ref.split('.')),
errors=errors))
except Exception as e:
if errors=='strict':
raise e
if errors=='filter':
warnings.warn('{} calling makeBiblerefFromDTR:\n{}\nReturning None'.format(type(e).__name__, e, ref))
return None
elif errors=='ignore':
warnings.warn('{} calling makeBiblerefFromDTR:\n{}\nIgnoring error and returning {} as string'.format(type(e).__name__, e, ref))
return ref
# # convenience function so i can apply makeBiblerefFromDTR to lots of data and return bad data unchanged
# def UserrefFromDTR(ref):
# """Return a user-readable reference, or the input string if not processable"""
# result = makeBiblerefFromDTR(ref, errors='ignore')
# if isinstance(result, GenericBibleref):
# return result.userstring()
# else:
# return ref
# # wish i could just do this in a lambda
# def protect_plus(string):
# if '+' in string: return string.replace('+', '\+')
# else: return string
# # just testing bible and book
# MACHINEREF_REGEXP = re.compile('(?P<bible>%s)\.(?P<book>\d+)' %
# '|'.join(map(protect_plus,
# MACHINE_BIBLE_DATATYPES.keys())))
# def humanrefFromLdlsref(string):
# """If STRING can be 'parsed' as an Ldlsref (bible.3.4.5), return
# the human readable reference: otherwise return STRING
# unchanged. Does no validation.
# """
# m = MACHINEREF_REGEXP.match(string)
# if m:
# try:
# br = makeBiblerefFromLdlsref(string)
# return br.userstring(withbibletype=True)
# except LdlsrefError:
# return string
# else:
# return string
# global _LDLS, _refcache
# _LDLS = None
# _refcache = {}
# def Biblia2Bibleref(result):
# """
# Given a valid result from calling Biblia's parse service, return a
# list of bibleref objects (may be empty).
# """
# biblerefs = []
# for r in result:
# parts = r.get('parts')
# bb = Book(parts['book'])
# if not bb:
# raise ValueError('Biblia2Bibleref: failed on %s' % r.get('passage'))
# parts.update({'book': int(bb.refid)})
# # filter out extraneous keys
# startparts = dict([(k, v) for k, v in parts.items()
# if k in ['book', 'chapter', 'verse']])
# startref = makeBibleref(**startparts)
# if parts.get('endVerse'):
# # not handling cross-book ranges
# endparts = {
# 'book': parts['book'],
# 'chapter': parts.get('endChapter') or parts.get('chapter'),
# 'verse': parts.get('endVerse'),
# }
# endref = makeBibleref(**endparts)
# biblerefs.append(makeRangeref(start=startref, end=endref))
# else:
# biblerefs.append(startref)
# return biblerefs
# def GetBibliaLink(reference='', bible='nrsv'):
# """
# Pass in reference from RenderBibleReference using English/full=False
# """
# ref = re.sub(r' *', r'', reference)
# ref = re.sub(r':', r'.', ref)
# return 'http://biblia.com/bible/%s/%s' % (bible, ref)
# def LdlsParseBibleref(string, bibletype='bible', lang='en'):
# """
# Given a string like 'Jn 3:16', uses Libronix to return a Lbx-style
# reference like u'bible.64.3.16', or u'' if none can be
# parsed. Uses English book names and abbreviations unless a
# different LANG is supplied. Does not validate verse or chapter
# indices to ensure they're valid, and makes guesses about
# over-abbreviated book names like 'J'. If prefixed with a Bible
# datatype like 'Bible:', 'BibleBHS:' or 'BibleLXX:', maps that to
# the datatype: otherwise assumes 'bible'.
# """
# raise NotImplementedError('LdlsParseBibleref is deprecated: use logos_pyutil.clients.ReferenceUtility')
# # global _LDLS
# # # only start it once
# # if not _LDLS:
# # raise SystemError, \
# # 'This COM interface has been deprecated'
# # _LDLS = LbxApplication()
# # cachekey = (string, bibletype, lang)
# # if cachekey in _refcache:
# # # if it's in the cache, return it directly
# # return _refcache.get(cachekey)
# # else:
# # hbr = isHumanBibleref(string)
# # if hbr:
# # (bibletype, string) = hbr
# # ref = _LDLS.Application.DataTypeManager.Parse(datatype, string, lang).reference
# # # print "%s + %s -> %s" % (bibletype, string, ref)
# # if not ref:
# # raise BiblerefParserError, "LDLS parser returned empty string"
# # else:
# # _refcache[cachekey] = makeBiblerefFromLdlsref(ref)
# # return _refcache[cachekey]
# # this doesn't handle alternate bible datatypes yet
# def LdlsMultiParseBibleref(string, lang='en'):
# """
# Given a string containing multiple Bible references like 'Jn
# 3:16', uses Libronix to return a list of Lbx-style reference like
# u'bible.64.3.16', or the empty list if none can be parsed. Same
# caveats as LdlsParseBibleref. There doesn't seem to be any
# tolerance of non-reference stuff in the string, so keep it
# clean.
# Issues:
# - Returns Chapterrefs for single-chapter books like 'Let Jer 11',
# which probably isn't what you want.
# - Ez is Ezra, not Ezekiel, which may surprise you
# - () and [] cause misbehavior
# - context is carried over: if you have references that change book
# context inside a (), and that fails (it will), then you'll get
# completions of underspecified references that you don't expect
# """
# global _LDLS
# # only start it once
# if not _LDLS:
# raise SystemError('This COM interface has been deprecated')
# _LDLS = LbxApplication()
# biblerefs = []
# cleanstr = string.strip(' ;,\r')
# refs = list(_LDLS.Application.DataTypeManager.MultiParse("bible", string, lang).references)
# if refs:
# return [makeBiblerefFromLdlsref(x) for x in refs]
# else:
# return []
# ##### matching logic and weights
# def matchweight(self, other,
# functions = {'rangestart': RangeChapterref.rangeedge,
# 'rangeend': RangeChapterref.rangeedge,
# 'range': RangeChapterref.rangeweight,
# 'verseinchapter': Chapterref.verseweight,
# 'rangeverseinchapter': RangeChapterref.verseweight,
# }):
# """
# Return a weight for the match between SELF and OTHER. Identity =
# 1, and no match = 0, otherwise the weight is derived by the value
# in FUNCTIONS for the match key as follows:
# 'rangestart', 'rangeend' (self.rangeedge, defaults to 2 * rangeweight)
# 'range' (range subsuming same-level ref, either single or range) = self.rangeweight(other),
# defaults to len(other)/len(self)
# '[range]verseinchapter' ([Range]Chapterref subsuming [Range]Verseref) = self.verseweight(other),
# defaults to len(other)/self.sublevel_length()
# You can supply your own dictionaries of FUNCTIONS, but the keys
# listed above are required.
# """
# if self == other:
# return 1
# elif self.Subsumes(other):
# try:
# if isinstance(self, RangeChapterref):
# if self.leveleq(other):
# # default if nothing better fits
# fn = functions.get('range')
# if isinstance(other, Chapterref):
# if self.start.indices == other.indices:
# fn = functions.get('rangestart')
# if self.end.indices == other.indices:
# fn = functions.get('rangeend')
# else: # other must be Verseref or RangeVerseref
# fn = functions.get('rangeverseinchapter')
# elif (isinstance(self, Chapterref) and isinstance(other, Verseref)):
# fn = functions.get('verseinchapter')
# return fn(self, other)
# except:
# print("matchweight failed on %s and %s" % (self, other))
# else: # 0 if nothing else fits
# return 0
| StarcoderdataPython |
3400531 | #!/bin/bash
sweepParamsFile=$1
casesListFile=$2
python2 mexdex/prepinputs.py --SR_valueDelimiter " " --SR_paramsDelimiter "\n" --noParamTag --CL_paramValueDelimiter = $sweepParamsFile $casesListFile
| StarcoderdataPython |
8009487 | <reponame>adoreblvnk/code_solutions<gh_stars>0
from typing import List
from collections import defaultdict
# Runtime: 101 ms, faster than 90.91% of Python3 online submissions for Group Anagrams.
# Memory Usage: 18.5 MB, less than 37.03% of Python3 online submissions for Group Anagrams.
# https://leetcode.com/submissions/detail/710152215/
class Solution:
def groupAnagrams(self, strs: List[str]) -> List[List[str]]:
"""
NOTE:
defaultdict creates new key if not exists.
hashmap to group sorted words together.
"""
hashmap = defaultdict(list)
for word in strs:
hashmap[tuple(sorted(word))].append(word)
return hashmap.values()
print(Solution().groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"]))
| StarcoderdataPython |
3436428 | <reponame>Dreem-Organization/bender-api
from __future__ import unicode_literals
from django.contrib.postgres.fields import JSONField
from .algo import Algo
from django.db import models
class Parameter(models.Model):
DESCRIPTIVE = "descriptive"
CATEGORICAL = "categorical"
UNIFORM = "uniform"
NORMAL = "normal"
LOGNORMAL = "lognormal"
LOGUNIFORM = "loguniform"
PARAMETER_TYPE = (
(DESCRIPTIVE, "Descriptive parameter"),
(CATEGORICAL, "Categorical parameter"),
(UNIFORM, "Uniformly distributed parameter"),
(NORMAL, "Normally distributed parameter"),
(LOGNORMAL, "Log-Normally distributed parameter"),
(LOGUNIFORM, "Log-Uniformly distributed parameter"),
)
algo = models.ForeignKey(Algo, related_name="parameters")
name = models.CharField(max_length=150)
category = models.CharField(choices=PARAMETER_TYPE, max_length=50, blank=True, null=True)
search_space = JSONField(blank=True, null=True)
class Meta:
unique_together = (("algo", "name"),)
def __str__(self):
return self.name
| StarcoderdataPython |
1906867 | <reponame>hfchong/dvc
import signal
import threading
from dvc.logger import Logger
class SignalHandler(object):
def __enter__(self):
if isinstance(threading.current_thread(), threading._MainThread):
self.old_handler = signal.signal(signal.SIGINT, self.handler)
def handler(self, sig, frame):
Logger.debug('Ignoring SIGINT during critical parts of code...')
def __exit__(self, type, value, traceback):
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, self.old_handler)
| StarcoderdataPython |
3424579 | <reponame>RuijieYu/algo<filename>data/matrix.py
#!/usr/bin/env -S python3 -i
# pylint: disable=invalid-name
import typing;
import io;
from .. import iters;
from ..funcs import keyDispatch as kd;
from ..funcs import typeDispatch as td;
class Matrix:
__slots__: typing.Tuple[str] = (
'_matrix',
# '__dict__',
#'__weakref__',
);
def __init__(
self: 'Matrix', source: typing.Any = None,
*args, **kwargs) -> None:
'''
Initialize a Matrix depending on the type of `source` or
the use of keywords
Keyword:
Both `width` and `height` defined with int:
`source` is `io.IOBase` (file):
Read lines from file; Optionally supply the custom
deliminator to `delim`; default to comma (`,`)
`source` is `tuple`, specifically, `Tuple[int, int]`:
Initialize matrix container with zeros, assuming
`row, col` size.
'''
self._matrix: typing.List[typing.List[int]] = [];
Matrix.__construct(self, source, *args, **kwargs);
@kd.keywordPriorityDispatch
# default func is methodDispatch
@td.methodDispatch
def __construct(self: 'Matrix', *_, **__) -> None:
pass
@__construct.__wrapped__.register
def _(self, source: io.IOBase, *_, delim: str = ',', **__) -> None:
self.__init__();
for line in source:
self._matrix.append(list(int(num) for num in line.split(delim)));
@__construct.__wrapped__.register
def _(self, source: tuple, *_, **__) -> None:
'Initialize with zeros, given width and height'
if len(source) < 2:
raise ValueError;
if not all(isinstance(elem, int) for elem in source):
raise TypeError;
# initialize with keywords
self.__init__(width=source[0], height=source[1]);
@__construct.register('width', 'height')
def _(
self, *_,
width: int, height: int,
value: int = 0, **__) -> None:
'''
Initialize an empty matrix with given width and height;
optionally take a value to initialize with
'''
# default init
self.__init__();
if not all(isinstance(elem, int) for elem in (width, height)):
raise TypeError;
# row
for _ in range(width):
# append
self._matrix.append([]);
# col
for _ in range(height):
self._matrix[-1].append(value);
def __repr__(self: 'Matrix') -> str:
'Formal representation of matrix'
# print(f'object: {object.__repr__(self)}');
return (
'<'
f'{self.__class__.__module__}.'
f'{self.__class__.__qualname__}'
f' object at {hex(id(self))}'
f'; width={self.width}, height={self.height}>'
);
def __str__(self: 'Matrix') -> str:
'Informal representation of a Matrix object'
def _row(lst: typing.List[int]) -> str:
return f'[{",".join(str(num) for num in lst)}]';
return f'[{";".join(_row(row) for row in self._matrix)}]';
@property
def prettyStr(self: 'Matrix') -> str:
'A descriptor returning a somewhat prettier string representation'
return str(self).replace(';', '\n ').replace(',', ',\t');
def pretty(self: 'Matrix') -> None:
'Print the prettyStr'
print(self.prettyStr);
@td.methodDispatch
def __getitem__(self: 'Matrix', index: typing.Any) -> int:
'''
Return item indicated by `index`
If `index` is `tuple[int row, int col]`:
Return item at `(row, col)` position
If `index` is int:
Not implemented
'''
raise NotImplementedError;
@__getitem__.register
def _(self, index: tuple) -> int:
assert len(index) >= 2, f'Insufficient length (at least 2): {index}';
assert all(isinstance(elem, int) for elem in index), \
'All elements of `index` should be of index type';
return self._matrix[index[0]][index[1]];
@td.methodDispatch
def __setitem__(self: 'Matrix', index: typing.Any, value: int) -> None:
'''
Set value of item indicated by `index`
If `index` is `tuple[int row, int col]`:
Set value at `(row, col)` position
If `index` is int:
Not implemented
'''
raise NotImplementedError;
@__setitem__.register
def _(self, index: tuple, value: int) -> None:
assert len(index) >= 2, f'Insufficient length (at least 2): {index}';
assert all(isinstance(elem, int) for elem in index), \
'All elements of `index` should be of index type';
self._matrix[index[0]][index[1]] = value;
@property
def height(self: 'Matrix') -> int:
'Return the height of the matrix'
return len(self._matrix);
@property
def width(self: 'Matrix') -> int:
'Return the width of the matrix; if uneven, raise error'
if not self.height:
return 0;
wid: int = len(self._matrix[0]);
if all(wid == len(row) for row in self._matrix):
return wid;
raise ValueError('Uneven matrix encountered.');
def __iter__(self: 'Matrix') -> typing.Iterator[int]:
'''
Return a flat iterator for all elements of the matrix;
order of elements is not necessarily retained
'''
return iters.iterAppend(
iter(row) for row in self._matrix
);
def debug() -> None:
# pylint: disable=unused-variable
m1 = Matrix(width=3, height=3);
m2 = Matrix(width=3, height=3, value=1);
# debug();
__all__: typing.Tuple[str, ...] = (
'Matrix',
);
| StarcoderdataPython |
3445570 | from bs4 import BeautifulSoup
from ._helpers import text_values, lists2dict, MetaList
def docket(oscn_html):
soup = BeautifulSoup(oscn_html, "html.parser")
docket_table = soup.find("table", "docketlist")
thead = docket_table.find("thead").find_all("th")
rows = docket_table.find("tbody").find_all("tr")
minutes = MetaList()
minutes.text = docket_table.text
# make a lower case list of column headers
columns = [hdr for hdr in map(lambda str: str.lower(), text_values(thead))]
for row in rows:
cells = row.find_all("td")
values = text_values(cells)
minute = lists2dict(columns, values)
minute["html"] = row.decode()
minutes.append(minute)
# clean up blank dates
saved_date = minutes[0]["date"]
for idx, min in enumerate(minutes):
if min["date"]:
saved_date = min["date"]
else:
min["date"] = saved_date
return minutes
setattr(docket, "target", ["Case"])
setattr(docket, "_default_value", [])
| StarcoderdataPython |
9688795 | <reponame>hhding/fio-plot<filename>fio_plot/fiolib/dataimport.py
import os
import sys
import csv
import pprint as pprint
import statistics
import fiolib.supporting as supporting
def list_fio_log_files(directory):
"""Lists all .log files in a directory. Exits with an error if no files are found.
"""
absolute_dir = os.path.abspath(directory)
files = os.listdir(absolute_dir)
fiologfiles = []
for f in files:
if f.endswith(".log"):
fiologfiles.append(os.path.join(absolute_dir, f))
if len(fiologfiles) == 0:
print("Could not find any log \
files in the specified directory " + str(absolute_dir))
sys.exit(1)
return fiologfiles
def return_folder_name(filename):
dirname = os.path.dirname(filename)
basename = os.path.basename(dirname)
return(basename)
def return_filename_filter_string(settings):
"""Returns a list of dicts with, a key/value for the search string.
This string is used to filter the log files based on the command line
parameters.
"""
searchstrings = []
rw = settings['rw']
iodepths = settings['iodepth']
numjobs = settings['numjobs']
benchtypes = settings['type']
for benchtype in benchtypes:
for iodepth in iodepths:
for numjob in numjobs:
searchstring = f"{rw}-iodepth-{iodepth}-numjobs-{numjob}_{benchtype}"
attributes = {'rw': rw, 'iodepth': iodepth,
'numjobs': numjob, 'type': benchtype,
'searchstring': searchstring}
searchstrings.append(attributes)
return searchstrings
def filterLogFiles(settings, file_list):
"""Returns a list of log files that matches the supplied filter string(s).
"""
searchstrings = return_filename_filter_string(settings)
# pprint.pprint(searchstrings)
result = []
for item in file_list:
for searchstring in searchstrings:
if searchstring['searchstring'] in item:
data = {'filename': item}
data.update(searchstring)
data['directory'] = return_folder_name(item)
result.append(data)
# pprint.pprint(result)
if len(result) > 0:
return result
else:
print(
f"\nNo log files found that matches the specified parameter {settings['rw']}\n")
exit(1)
def getMergeOperation(datatype):
""" FIO log files with a numjobs larger than 1 generates a separate file
for each job thread. So if numjobs is 8, there will be eight files.
We need to merge the data from all those job files into one result.
Depending on the type of data, we must sum or average the data.
This function returns the appropriate function/operation based on the type.
"""
operationMapping = {'iops': sum,
'lat': statistics.mean,
'clat': statistics.mean,
'slat': statistics.mean,
'bw': sum,
'timestamp': statistics.mean}
opfunc = operationMapping[datatype]
return opfunc
def mergeSingleDataSet(data, datatype):
"""In this function we merge all data for one particular set of files.
For examle, iodepth = 1 and numjobs = 8. The function returns one single
dataset containing the summed/averaged data.
"""
mergedSet = {'read': [], 'write': []}
lookup = {'read': 0, 'write': 1}
for rw in ['read', 'write']:
for column in ['timestamp', 'value']:
unmergedSet = []
for record in data:
templist = []
for row in record['data']:
if int(row['rwt']) == lookup[rw]:
templist.append(int(row[column]))
unmergedSet.append(templist)
if column == 'value':
oper = getMergeOperation(datatype)
else:
oper = getMergeOperation(column)
merged = [oper(x) for x in zip(*unmergedSet)]
mergedSet[rw].append(merged)
mergedSet[rw] = list(zip(*mergedSet[rw]))
return mergedSet
def get_unique_directories(dataset):
directories = []
for item in dataset:
dirname = item['directory']
if dirname not in directories:
directories.append(dirname)
return directories
def mergeDataSet(settings, dataset):
"""We need to merge multiple datasets, for multiple iodepts and numjob
values. The return is a list of those merged datasets.
We also take into account if multiple folders are specified to compare
benchmarks results across different runs.
"""
mergedSets = []
filterstrings = return_filename_filter_string(settings)
directories = get_unique_directories(dataset)
for directory in directories:
for filterstring in filterstrings:
record = {'type': filterstring['type'],
'iodepth': filterstring['iodepth'], 'numjobs': filterstring['numjobs'], 'directory': directory}
data = []
for item in dataset:
if filterstring['searchstring'] in item['searchstring'] and \
item['directory'] == directory:
data.append(item)
newdata = mergeSingleDataSet(data, filterstring['type'])
record['data'] = newdata
mergedSets.append(record)
return mergedSets
def parse_raw_cvs_data(dataset):
"""This function exists mostly because I tried to test the performance
of a 1.44MB floppy drive. The device is so slow that it can't keep up.
This results in records that span multiple seconds, skewing the graphs.
If this is detected, the data is averaged over the interval between records.
Although this shows a more realistic throughput of
"""
new_set = []
distance_list = []
for index, item in enumerate(dataset):
if index == 0:
continue
else:
distance = int(item['timestamp']) - \
int(dataset[index - 1]['timestamp'])
distance_list.append(distance)
mean = statistics.mean(distance_list)
if mean > 1000:
print(f"{supporting.bcolors.WARNING} WARNING: the storage could not "
f"keep up with the configured I/O request size. Data is interpolated.{supporting.bcolors.ENDC}")
for index, item in enumerate(dataset):
if index == 0:
average_value = int(item['value']) / \
int(item['timestamp']) * 1000
else:
previous_timestamp = int(dataset[index - 1]['timestamp'])
distance = int(item['timestamp']) - previous_timestamp
number_of_seconds = int(distance / 1000)
average_value = int(item['value']) / distance * 1000
for x in range(number_of_seconds):
temp_dict = dict(item)
temp_dict['value'] = average_value
temp_dict['timestamp'] = previous_timestamp + x
new_set.append(temp_dict)
return new_set
else:
return dataset
def readLogData(inputfile):
"""FIO log data is imported as CSV data. The scope is the import of a
single file.
"""
dataset = []
if os.path.exists(inputfile):
with open(inputfile) as csv_file:
csv.register_dialect('CustomDialect', skipinitialspace=True,
strict=True)
csv_reader = csv.DictReader(
csv_file, dialect='CustomDialect', delimiter=',',
fieldnames=['timestamp', 'value', 'rwt', 'blocksize', 'offset'])
for item in csv_reader:
dataset.append(item)
dataset = parse_raw_cvs_data(dataset)
return dataset
def readLogDataFromFiles(settings, inputfiles):
"""Returns a list of imported datasets based on the input files.
"""
data = []
for inputfile in inputfiles:
logdata = readLogData(inputfile['filename'])
logdict = {"data": logdata}
logdict.update(inputfile)
data.append(logdict)
return data
| StarcoderdataPython |
11286281 | import io
from PIL import Image
Image.preinit()
class ContentResponseMixin(object):
def process_response(self, response):
return response.content
class JSONResponseMixin(object):
def process_response(self, response):
return response.json()
class ImageResponseMixin(object):
def process_response(self, response):
io_buffer = io.BytesIO(response.content)
return Image.open(io_buffer)
class MagicResponseMixin(object):
_content_types_mixins = dict({
"application/json": JSONResponseMixin,
}, **dict.fromkeys(Image.MIME.values(), ImageResponseMixin))
def process_response(self, response):
content_type = response.headers.get("Content-Type").split(';')[0]
klass = self._content_types_mixins.get(content_type, ContentResponseMixin)
return klass.process_response(self, response)
| StarcoderdataPython |
6449976 | <reponame>Willyoung2017/doc-qa
from typing import List, Optional
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import pairwise_distances
from docqa.utils import flatten_iterable
from docqa.data_processing.text_utils import NltkPlusStopWords, ParagraphWithInverse
from docqa.configurable import Configurable
from docqa.triviaqa.evidence_corpus import TriviaQaEvidenceCorpusTxt
"""
Splits a document into paragraphs
"""
class ExtractedParagraph(object):
__slots__ = ["text", "start", "end"]
def __init__(self, text: List[List[str]], start: int, end: int):
"""
:param text: List of source paragraphs that have been merged to form `self`
:param start: start token of this text in the source document
:param end: end token of this text in the source document
"""
self.text = text
self.start = start
self.end = end
@property
def n_context_words(self):
return sum(len(s) for s in self.text)
class ExtractedParagraphWithAnswers(ExtractedParagraph):
__slots__ = ["answer_spans"]
def __init__(self, text: List[List[str]], start: int, end: int, answer_spans: np.ndarray):
super().__init__(text, start, end)
self.answer_spans = answer_spans
class DocParagraphWithAnswers(ExtractedParagraphWithAnswers):
__slots__ = ["doc_id"]
def __init__(self, text: List[List[str]], start: int, end: int, answer_spans: np.ndarray,
doc_id):
super().__init__(text, start, end, answer_spans)
self.doc_id = doc_id
class ParagraphFilter(Configurable):
""" Selects and ranks paragraphs """
def prune(self, question, paragraphs: List[ExtractedParagraph]) -> List[ExtractedParagraph]:
raise NotImplementedError()
class FirstN(ParagraphFilter):
def __init__(self, n):
self.n = n
def prune(self, question, paragraphs: List[ExtractedParagraphWithAnswers]):
return sorted(paragraphs, key=lambda x: x.start)[:self.n]
class ContainsQuestionWord(ParagraphFilter):
def __init__(self, stop, allow_first=True, n_paragraphs: int=None):
self.stop = stop
self.allow_first = allow_first
self.n_paragraphs = n_paragraphs
def prune(self, question, paragraphs: List[ExtractedParagraphWithAnswers]):
q_words = {x.lower() for x in question}
q_words -= self.stop.words
output = []
for para in paragraphs:
if self.allow_first and para.start == 0:
output.append(para)
continue
keep = False
for sent in para.text:
if any(x.lower() in q_words for x in sent):
keep = True
break
if keep:
output.append(para)
if self.n_paragraphs is not None:
output = output[:self.n_paragraphs]
return output
class TopTfIdf(ParagraphFilter):
def __init__(self, stop, n_to_select: int, filter_dist_one: bool=False, rank=True):
self.stop = stop
self.rank = rank
self.n_to_select = n_to_select
self.filter_dist_one = filter_dist_one
def prune(self, question, paragraphs: List[ExtractedParagraph]):
if not self.filter_dist_one and len(paragraphs) == 1:
return paragraphs
tfidf = TfidfVectorizer(strip_accents="unicode", stop_words=self.stop.words)
text = []
for para in paragraphs:
text.append(" ".join(" ".join(s) for s in para.text))
try:
para_features = tfidf.fit_transform(text)
q_features = tfidf.transform([" ".join(question)])
except ValueError:
return []
dists = pairwise_distances(q_features, para_features, "cosine").ravel()
sorted_ix = np.lexsort(([x.start for x in paragraphs], dists)) # in case of ties, use the earlier paragraph
if self.filter_dist_one:
return [paragraphs[i] for i in sorted_ix[:self.n_to_select] if dists[i] < 1.0]
else:
return [paragraphs[i] for i in sorted_ix[:self.n_to_select]]
def dists(self, question, paragraphs: List[ExtractedParagraph]):
tfidf = TfidfVectorizer(strip_accents="unicode", stop_words=self.stop.words)
text = []
for para in paragraphs:
text.append(" ".join(" ".join(s) for s in para.text))
try:
para_features = tfidf.fit_transform(text)
q_features = tfidf.transform([" ".join(question)])
except ValueError:
return []
dists = pairwise_distances(q_features, para_features, "cosine").ravel()
sorted_ix = np.lexsort(([x.start for x in paragraphs], dists)) # in case of ties, use the earlier paragraph
if self.filter_dist_one:
return [(paragraphs[i], dists[i]) for i in sorted_ix[:self.n_to_select] if dists[i] < 1.0]
else:
return [(paragraphs[i], dists[i]) for i in sorted_ix[:self.n_to_select]]
class ShallowOpenWebRanker(ParagraphFilter):
# Hard coded weight learned from a logistic regression classifier
TFIDF_W = 5.13365065
LOG_WORD_START_W = 0.46022765
FIRST_W = -0.08611607
LOWER_WORD_W = 0.0499123
WORD_W = -0.15537181
def __init__(self, n_to_select):
self.n_to_select = n_to_select
self._stop = NltkPlusStopWords(True).words
self._tfidf = TfidfVectorizer(strip_accents="unicode", stop_words=self._stop)
def get_features(self, question: List[str], paragraphs: List[List[ExtractedParagraphWithAnswers]]):
scores = self.score_paragraphs(question, flatten_iterable(paragraphs))
# return scores
return np.expand_dims(scores, 1)
def get_feature_names(self):
return ["Score"]
def score_paragraphs(self, question, paragraphs: List[ExtractedParagraphWithAnswers]):
tfidf = self._tfidf
text = []
for para in paragraphs:
text.append(" ".join(" ".join(s) for s in para.text))
try:
para_features = tfidf.fit_transform(text)
q_features = tfidf.transform([" ".join(question)])
except ValueError:
return []
q_words = {x for x in question if x.lower() not in self._stop}
q_words_lower = {x.lower() for x in q_words}
word_matches_features = np.zeros((len(paragraphs), 2))
for para_ix, para in enumerate(paragraphs):
found = set()
found_lower = set()
for sent in para.text:
for word in sent:
if word in q_words:
found.add(word)
elif word.lower() in q_words_lower:
found_lower.add(word.lower())
word_matches_features[para_ix, 0] = len(found)
word_matches_features[para_ix, 1] = len(found_lower)
tfidf = pairwise_distances(q_features, para_features, "cosine").ravel()
starts = np.array([p.start for p in paragraphs])
log_word_start = np.log(starts/400.0 + 1)
first = starts == 0
scores = tfidf * self.TFIDF_W + self.LOG_WORD_START_W * log_word_start + self.FIRST_W * first +\
self.LOWER_WORD_W * word_matches_features[:, 1] + self.WORD_W * word_matches_features[:, 0]
return scores
def prune(self, question, paragraphs: List[ExtractedParagraphWithAnswers]):
scores = self.score_paragraphs(question, paragraphs)
sorted_ix = np.argsort(scores)
return [paragraphs[i] for i in sorted_ix[:self.n_to_select]]
def __getstate__(self):
return dict(n_to_select=self.n_to_select)
def __setstate__(self, state):
return self.__init__(state['n_to_select'])
class DocumentSplitter(Configurable):
""" Re-organize a collection of tokenized paragraphs into `ExtractedParagraph`s """
@property
def max_tokens(self):
""" max number of tokens a paragraph from this splitter can have, or None """
return None
@property
def reads_first_n(self):
""" only requires the first `n` tokens of the documents, or None """
return None
def split(self, doc: List[List[List[str]]]) -> List[ExtractedParagraph]:
"""
Splits a list paragraphs->sentences->words to a list of `ExtractedParagraph`
"""
raise NotImplementedError()
def split_annotated(self, doc: List[List[List[str]]], spans: np.ndarray) -> List[ExtractedParagraphWithAnswers]:
"""
Split a document and additionally splits answer_span of each paragraph
"""
out = []
for para in self.split(doc):
para_spans = spans[np.logical_and(spans[:, 0] >= para.start, spans[:, 1] < para.end)] - para.start
out.append(ExtractedParagraphWithAnswers(para.text, para.start, para.end, para_spans))
return out
def split_inverse(self, paras: List[ParagraphWithInverse], delim="\n") -> List[ParagraphWithInverse]:
"""
Split a document consisting of `ParagraphWithInverse` objects
`delim` will be added to the original_txt of between each paragraph
"""
full_para = ParagraphWithInverse.concat(paras, delim)
split_docs = self.split([x.text for x in paras])
out = []
for para in split_docs:
# Grad the correct inverses and convert back to the paragraph level
inv = full_para.spans[para.start:para.end]
text = full_para.get_original_text(para.start, para.end-1)
inv -= inv[0][0]
out.append(ParagraphWithInverse(para.text, text, inv))
return out
class Truncate(DocumentSplitter):
""" map a document to a single paragraph of the first `max_tokens` tokens """
def __init__(self, max_tokens: int):
self.max_tokens = max_tokens
def max_tokens(self):
return self.max_tokens
@property
def reads_first_n(self):
return self.max_tokens
def split(self, doc: List[List[List[str]]]):
output = []
cur_tokens = 0
for para in doc:
for sent in para:
if cur_tokens + len(sent) > self.max_tokens:
output.append(sent[:self.max_tokens - cur_tokens])
return [ExtractedParagraph(output, 0, self.max_tokens)]
else:
cur_tokens += len(sent)
output.append(sent)
return [ExtractedParagraph(output, 0, cur_tokens)]
class MergeParagraphs(DocumentSplitter):
"""
Merge paragraphs up to a maximum size. Paragraphs of a larger size will be truncated.
"""
def __init__(self, max_tokens: int, top_n: int=None):
self.max_tokens = max_tokens
self.top_n = top_n
@property
def reads_first_n(self):
return self.top_n
def max_tokens(self):
return self.max_tokens
def split(self, doc: List[List[List[str]]]):
all_paragraphs = []
on_doc_token = 0 # the word in the document the current paragraph starts at
on_paragraph = [] # text we have collect for the current paragraph
cur_tokens = 0 # number of tokens in the current paragraph
word_ix = 0
for para in doc:
para = flatten_iterable(para)
n_words = len(para)
if self.top_n is not None and (word_ix+self.top_n)>self.top_n:
if word_ix == self.top_n:
break
para = para[:self.top_n - word_ix]
n_words = self.top_n - word_ix
start_token = word_ix
end_token = start_token + n_words
word_ix = end_token
if cur_tokens + n_words > self.max_tokens:
if cur_tokens != 0: # end the current paragraph
all_paragraphs.append(ExtractedParagraph(on_paragraph, on_doc_token, start_token))
on_paragraph = []
cur_tokens = 0
if n_words >= self.max_tokens: # either truncate the given paragraph, or begin a new paragraph
text = para[:self.max_tokens]
all_paragraphs.append(ExtractedParagraph([text], start_token,
start_token + self.max_tokens))
on_doc_token = end_token
else:
on_doc_token = start_token
on_paragraph.append(para)
cur_tokens = n_words
else:
on_paragraph.append(para)
cur_tokens += n_words
if len(on_paragraph) > 0:
all_paragraphs.append(ExtractedParagraph(on_paragraph, on_doc_token, word_ix))
return all_paragraphs
class PreserveParagraphs(DocumentSplitter):
"""
Convience class that preserves the document's natural paragraph delimitation
"""
def split(self, doc: List[List[List[str]]]):
out = []
on_token = 0
for para in doc:
flattened_para = flatten_iterable(para)
end = on_token + len(flattened_para)
out.append(ExtractedParagraph([flatten_iterable(para)], on_token, end))
on_token = end
return out
def extract_tokens(paragraph: List[List[str]], n_tokens) -> List[List[str]]:
output = []
cur_tokens = 0
for sent in paragraph:
if len(sent) + cur_tokens > n_tokens:
if n_tokens != cur_tokens:
output.append(sent[:n_tokens - cur_tokens])
return output
else:
output.append(sent)
cur_tokens += len(sent)
return output
def test_splitter(splitter: DocumentSplitter, n_sample, n_answer_spans, seed=None):
rng = np.random.RandomState(seed)
corpus = TriviaQaEvidenceCorpusTxt()
docs = sorted(corpus.list_documents())
rng.shuffle(docs)
max_tokens = splitter.max_tokens
read_n = splitter.reads_first_n
for doc in docs[:n_sample]:
text = corpus.get_document(doc, read_n)
fake_answers = []
offset = 0
for para in text:
flattened = flatten_iterable(para)
fake_answer_starts = np.random.choice(len(flattened), min(len(flattened)//2, np.random.randint(5)), replace=False)
max_answer_lens = np.minimum(len(flattened) - fake_answer_starts, 30)
fake_answer_ends = fake_answer_starts + np.floor(rng.uniform() * max_answer_lens).astype(np.int32)
fake_answers.append(np.concatenate([np.expand_dims(fake_answer_starts, 1), np.expand_dims(fake_answer_ends, 1)], axis=1) + offset)
offset += len(flattened)
fake_answers = np.concatenate(fake_answers, axis=0)
flattened = flatten_iterable(flatten_iterable(text))
answer_strs = set(tuple(flattened[s:e+1]) for s,e in fake_answers)
paragraphs = splitter.split_annotated(text, fake_answers)
for para in paragraphs:
text = flatten_iterable(para.text)
if max_tokens is not None and len(text) > max_tokens:
raise ValueError("Paragraph len len %d, but max tokens was %d" % (len(text), max_tokens))
start, end = para.start, para.end
if text != flattened[start:end]:
raise ValueError("Paragraph is missing text, given bounds were %d-%d" % (start, end))
for s, e in para.answer_spans:
if tuple(text[s:e+1]) not in answer_strs:
print(s,e)
raise ValueError("Incorrect answer for paragraph %d-%d (%s)" % (start, end, " ".join(text[s:e+1])))
def show_paragraph_lengths():
corpus = TriviaQaEvidenceCorpusTxt()
docs = corpus.list_documents()
np.random.shuffle(docs)
para_lens = []
for doc in docs[:5000]:
text = corpus.get_document(doc)
para_lens += [sum(len(s) for s in x) for x in text]
para_lens = np.array(para_lens)
for i in [400, 500, 600, 700, 800]:
print("Over %s: %.4f" % (i, (para_lens > i).sum()/len(para_lens)))
if __name__ == "__main__":
test_splitter(MergeParagraphs(200), 1000, 20, seed=0)
# show_paragraph_lengths()
| StarcoderdataPython |
3552547 | __author__ = 'roehrig'
import time_series_v0x_025deg_3hly
def create_time_series_from_rasters(raster_dir, lrs, layer_fieldname, time_series_filename):
time_series_v0x_025deg_3hly.create_time_series_from_rasters(raster_dir, lrs, layer_fieldname, time_series_filename) | StarcoderdataPython |
6553081 | #!/usr/bin/env python
""" A script to build specific fasta databases """
from __future__ import print_function
import argparse
import re
import sys
class Sequence(object):
def __init__(self, header, sequence_parts):
self.header = header
self.sequence_parts = sequence_parts
self._sequence = None
@property
def sequence(self):
if self._sequence is None:
self._sequence = ''.join(self.sequence_parts)
return self._sequence
def print(self, fh=sys.stdout):
print(self.header, file=fh)
for line in self.sequence_parts:
print(line, file=fh)
def FASTAReader_gen(fasta_filename):
with open(fasta_filename) as fasta_file:
line = fasta_file.readline()
while True:
if not line:
return
assert line.startswith('>'), "FASTA headers must start with >"
header = line.rstrip()
sequence_parts = []
line = fasta_file.readline()
while line and line[0] != '>':
sequence_parts.append(line.rstrip())
line = fasta_file.readline()
yield Sequence(header, sequence_parts)
def target_match(targets, search_entry, pattern):
''' Matches '''
search_entry = search_entry.upper()
m = pattern.search(search_entry)
if m:
target = m.group(len(m.groups()))
if target in targets:
return target
else:
print('No ID match: %s' % search_entry, file=sys.stdout)
return None
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-i', required=True, help='Path to input FASTA file')
parser.add_argument('-o', required=True, help='Path to output FASTA file')
parser.add_argument('-d', help='Path to discarded entries file')
header_criteria = parser.add_mutually_exclusive_group()
header_criteria.add_argument('--id_list', help='Path to the ID list file')
parser.add_argument('--pattern', help='regex search pattern for ID in FASTA entry')
header_criteria.add_argument('--header_regexp', help='Regular expression pattern the header should match')
sequence_criteria = parser.add_mutually_exclusive_group()
sequence_criteria.add_argument('--min_length', type=int, help='Minimum sequence length')
sequence_criteria.add_argument('--sequence_regexp', help='Regular expression pattern the sequence should match')
parser.add_argument('--max_length', type=int, help='Maximum sequence length')
parser.add_argument('--dedup', action='store_true', default=False, help='Whether to remove duplicate sequences')
options = parser.parse_args()
if options.pattern:
if not re.match('^.*[(](?![?]:).*[)].*$', options.pattern):
sys.exit('pattern: "%s" did not include capture group "()" in regex ' % options.pattern)
pattern = re.compile(options.pattern)
if options.min_length is not None and options.max_length is None:
options.max_length = sys.maxsize
if options.header_regexp:
header_regexp = re.compile(options.header_regexp)
if options.sequence_regexp:
sequence_regexp = re.compile(options.sequence_regexp)
work_summary = {'found': 0, 'discarded': 0}
if options.dedup:
used_sequences = set()
work_summary['duplicates'] = 0
if options.id_list:
targets = set()
with open(options.id_list) as f_target:
for line in f_target:
targets.add(line.strip().upper())
work_summary['wanted'] = len(targets)
homd_db = FASTAReader_gen(options.i)
if options.d:
discarded = open(options.d, 'w')
with open(options.o, "w") as output:
for entry in homd_db:
print_entry = True
if options.id_list:
target_matched_results = target_match(targets, entry.header, pattern)
if target_matched_results:
targets.remove(target_matched_results)
else:
print_entry = False
elif options.header_regexp:
if header_regexp.search(entry.header) is None:
print_entry = False
if options.min_length is not None:
sequence_length = len(entry.sequence)
if not(options.min_length <= sequence_length <= options.max_length):
print_entry = False
elif options.sequence_regexp:
if sequence_regexp.search(entry.sequence) is None:
print_entry = False
if print_entry:
if options.dedup:
if entry.sequence in used_sequences:
work_summary['duplicates'] += 1
continue
else:
used_sequences.add(entry.sequence)
work_summary['found'] += 1
entry.print(output)
else:
work_summary['discarded'] += 1
if options.d:
entry.print(discarded)
if options.d:
discarded.close()
for parm, count in work_summary.items():
print('%s ==> %d' % (parm, count))
if __name__ == "__main__":
main()
| StarcoderdataPython |
11375706 | from unittest.mock import ANY, Mock, call
import pytest
from rapiduino.boards.arduino import Arduino
from rapiduino.components.led.dimmable_led import DimmableLED
from rapiduino.globals.common import OUTPUT
PIN_NUM = 1
TOKEN = ANY
@pytest.fixture
def arduino() -> Mock:
arduino = Mock(spec=Arduino)
return arduino
@pytest.fixture
def led(arduino: Arduino) -> DimmableLED:
return DimmableLED(arduino, PIN_NUM)
def test_setup(arduino: Mock, led: DimmableLED) -> None:
assert arduino.pin_mode.call_args_list == [call(PIN_NUM, OUTPUT, TOKEN)]
assert arduino.analog_write.call_args_list == [call(PIN_NUM, 0, TOKEN)]
def test_turn_on(arduino: Mock, led: DimmableLED) -> None:
led.turn_on()
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
call(PIN_NUM, 255, TOKEN),
]
def test_turn_off(arduino: Mock, led: DimmableLED) -> None:
led.turn_off()
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
call(PIN_NUM, 0, TOKEN),
]
def test_is_on(arduino: Mock, led: DimmableLED) -> None:
assert led.is_on() is False
led.turn_on()
assert led.is_on() is True
def test_is_toggle(arduino: Mock, led: DimmableLED) -> None:
led.toggle()
led.toggle()
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
call(PIN_NUM, 255, TOKEN),
call(PIN_NUM, 0, TOKEN),
]
def test_brightness_when_on(arduino: Mock, led: DimmableLED) -> None:
led.turn_on()
led.brightness = 100
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
call(PIN_NUM, 255, TOKEN),
call(PIN_NUM, 100, TOKEN),
]
assert led.brightness == 100
def test_brightness_when_off(arduino: Mock, led: DimmableLED) -> None:
led.brightness = 100
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
]
assert led.brightness == 100
def test_non_default_brightness_is_set_when_turning_on(
arduino: Mock, led: DimmableLED
) -> None:
led.brightness = 100
led.turn_on()
assert arduino.analog_write.call_args_list == [
call(PIN_NUM, 0, TOKEN),
call(PIN_NUM, 100, TOKEN),
]
assert led.brightness == 100
| StarcoderdataPython |
9780195 | <filename>Wrappers/Python/cil/optimisation/operators/ChannelwiseOperator.py
# -*- coding: utf-8 -*-
# This work is part of the Core Imaging Library (CIL) developed by CCPi
# (Collaborative Computational Project in Tomographic Imaging), with
# substantial contributions by UKRI-STFC and University of Manchester.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from cil.framework import ImageData
from cil.optimisation.operators import LinearOperator
from cil.framework import ImageGeometry, AcquisitionGeometry, BlockGeometry
class ChannelwiseOperator(LinearOperator):
r'''ChannelwiseOperator: takes in a single-channel operator op and the
number of channels to be used, and creates a new multi-channel
ChannelwiseOperator, which will apply the operator op independently on
each channel for the number of channels specified.
ChannelwiseOperator supports simple operators as input but not
BlockOperators. Typically if such behaviour is desired, it can be achieved
by creating instead a BlockOperator of ChannelwiseOperators.
:param op: Single-channel operator
:param channels: Number of channels
:param dimension: 'prepend' (default) or 'append' channel dimension onto existing dimensions
'''
def __init__(self, op, channels, dimension='prepend'):
dom_op = op.domain_geometry()
ran_op = op.range_geometry()
geom_mc = []
# Create multi-channel domain and range geometries: Clones of the
# input single-channel geometries but with the specified number of
# channels and additional dimension_label 'channel'.
for geom in [dom_op,ran_op]:
if dimension == 'prepend':
new_dimension_labels = ['channel']+list(geom.dimension_labels)
elif dimension == 'append':
new_dimension_labels = list(geom.dimension_labels)+['channel']
else:
raise Exception("dimension must be either 'prepend' or 'append'")
if isinstance(geom, ImageGeometry):
geom_channels = geom.copy()
geom_channels.channels = channels
geom_channels.dimension_labels = new_dimension_labels
geom_mc.append(geom_channels)
elif isinstance(geom, AcquisitionGeometry):
geom_channels = geom.copy()
geom_channels.config.channels.num_channels = channels
geom_channels.dimension_labels = new_dimension_labels
geom_mc.append(geom_channels)
elif isinstance(geom,BlockGeometry):
raise Exception("ChannelwiseOperator does not support BlockOperator as input. Consider making a BlockOperator of ChannelwiseOperators instead.")
else:
pass
super(ChannelwiseOperator, self).__init__(domain_geometry=geom_mc[0],
range_geometry=geom_mc[1])
self.op = op
self.channels = channels
def direct(self,x,out=None):
'''Returns D(x)'''
# Loop over channels, extract single-channel data, apply single-channel
# operator's direct method and fill into multi-channel output data set.
if out is None:
output = self.range_geometry().allocate()
cury = self.op.range_geometry().allocate()
for k in range(self.channels):
self.op.direct(x.subset(channel=k),cury)
output.fill(cury.as_array(),channel=k)
return output
else:
cury = self.op.range_geometry().allocate()
for k in range(self.channels):
self.op.direct(x.subset(channel=k),cury)
out.fill(cury.as_array(),channel=k)
def adjoint(self,x, out=None):
'''Returns D^{*}(y)'''
# Loop over channels, extract single-channel data, apply single-channel
# operator's adjoint method and fill into multi-channel output data set.
if out is None:
output = self.domain_geometry().allocate()
cury = self.op.domain_geometry().allocate()
for k in range(self.channels):
self.op.adjoint(x.subset(channel=k),cury)
output.fill(cury.as_array(),channel=k)
return output
else:
cury = self.op.domain_geometry().allocate()
for k in range(self.channels):
self.op.adjoint(x.subset(channel=k),cury)
out.fill(cury.as_array(),channel=k)
def calculate_norm(self, **kwargs):
'''Evaluates operator norm of DiagonalOperator'''
return self.op.norm()
| StarcoderdataPython |
8048601 | <gh_stars>0
# Created by <NAME> at 5:23 PM 3/11/2020
#The modules here are inspired from the paper "Learning and Querying Fast Generative Models for Reinforcement Learning"
import torch.nn as nn, time, torch
import numpy as np
device = torch.device('cuda:1')
torch.cuda.set_device(device)
print("cuda device isss ", device)
class Conv_Stack(nn.Module):
def __init__(self, in_1=16,k1=3,c1=1, in_2=16, k2=3,c2=1,k3=3,c3=1,s1=1,s2=1,s3=1,p1=0,p2=0,p3=0):
super(Conv_Stack, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_1, out_channels=c1, kernel_size=k1, stride=s1, padding= p1)
self.leakyrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(in_channels=in_2, out_channels= c2, kernel_size=k2, stride=s2, padding=p2) #Size-Expanding convolution
self.leakyrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(in_channels=c2, out_channels=c3, kernel_size=k3, stride=s3, padding=p3) #op dimension not matched with anything for now
def forward(self, input):
input = self.conv1(input)
input = self.leakyrelu_1(input)
input_ = self.conv2(input)
input = self.leakyrelu_2(input+input_)
input = self.conv3(input)
return input
# space_to_depth operation outputs a copy of the input tensor where values from the height and width dimensions are moved to the depth dimension.
# It transfers an input size of (None, 38,38,64) to (None, 19,19, 256).
#referred from https://discuss.pytorch.org/t/is-there-any-layer-like-tensorflows-space-to-depth-function/3487/15
''' torch.nn.functional.pixel_shuffle does exactly what tf.nn.depth_to_space does,
PyTorch doesn't have any function to do the inverse operation similar to tf.nn.space_to_depth'''
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super(DepthToSpace,self).__init__()
self.blocksize = block_size
def forward(self, input): # input should be 4-dimensional of the formal (None, Channel-Depth, Height,Width)
no_dimen, channel_depth, height, width = input.shape
input = input.view(no_dimen, self.blocksize, self.blocksize, channel_depth // (self.blocksize ** 2), height, width)
input = input.permute(no_dimen, channel_depth//(self.blocksize^2), height, self.blocksize, width, self.blocksize).contiguous()
input = input.view(no_dimen, channel_depth // (self.blocksize ** 2), height * self.blocksize, width * self.blocksize)
return input
class SpaceToDepth(nn.Module): #space_to_depth is a convolutional practice used very often for lossless spatial dimensionality reduction
def __init__(self, block_size):
super(SpaceToDepth,self).__init__()
self.block_size = block_size
def forward(self, x):
n, c, h, w = x.shape[0],x.shape[1],x.shape[2],x.shape[3]
unfolded_x = torch.nn.functional.unfold(x, self.block_size, stride=self.block_size)
N_C_H_W = unfolded_x.view(n, c * self.block_size ** 2, h // self.block_size, w // self.block_size)
return N_C_H_W # N , C, H, W --> 1 16 37 37
class Observation_Encoder(nn.Module):
def __init__(self):
super(Observation_Encoder, self).__init__()
self.space_to_depth_1 = SpaceToDepth(4)
self.conv_stack_1 = Conv_Stack(in_1=16,in_2=16, k1=3, c1=16, k2=5, c2=16, k3=3, c3=64, s1=1, s2=1, s3=1, p1=0,p2=2,p3=0)
self.space_to_depth_2 = SpaceToDepth(2)
self.conv_stack_2 = Conv_Stack(in_1 = 256, in_2=32 ,k1=3, c1=32, k2=5, c2=32, k3=3, c3=64, s1=1, s2=1, s3=1, p1=0, p2=2, p3=0)
self.leaky_relu = nn.LeakyReLU()
def forward(self, input): #1,1,150,150
assert input.shape[0] == 1
assert input.shape[1] == 1
assert input.shape[2] == 150
input = self.space_to_depth_1(input) # 1,16,37,37
input = self.conv_stack_1(input)
input = self.space_to_depth_2(input) #[1, 256, 16, 16]
input = self.conv_stack_2(input)
input = self.leaky_relu(input)
return input #1, 64, 12, 12
class Residual_Conv_Stack(nn.Module):
def __init__(self, in_channelss = 1, k1=3, c1=32, k2=5, c2=32, k3=3, c3=64, s1=1, s2=1, s3=1, p1=1, p2=2, p3=1,final_op=118): #paddings to preserve the original H,W
super(Residual_Conv_Stack, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channelss, out_channels= c1, kernel_size=k1, stride= s1, padding= p1) #p1 = 1
self.leakyrelu_1 = nn.LeakyReLU()
self.conv2 = nn.Conv2d(in_channels=c1, out_channels= c2, kernel_size=k2, stride= s2, padding= p2) #p2 = 2
self.leakyrelu_2 = nn.LeakyReLU()
self.conv3 = nn.Conv2d(in_channels=c2, out_channels= c3, kernel_size=k3, stride= s3, padding= p3)#p3 = diff 1, 3
#Extra convolution added to make uniform dimension between input and input_ in forward()
self.conv4 = nn.Conv2d(in_channels=c3, out_channels= final_op, kernel_size=3, stride=1, padding=1)
def forward(self, input): #input shape: 1, X, 10, 10
input_ = self.conv1(input)
input_ = self.leakyrelu_1(input_)
input_ = self.conv2(input_)
input_ = self.leakyrelu_2(input_)
input_ = self.conv3(input_)
# To make uniform such that they can be added, pass input_ thru a convoulution
input_ = self.conv4(input_)
input_ = input + input_
return input_
class Initial_State_Module(nn.Module):
def __init__(self): #Each embedded observation is #1, 64, 12, 12 dimensions
super(Initial_State_Module, self).__init__()
self.conv_stack = Conv_Stack(in_1=192,k1=1,c1=64,in_2=64,k2=3,c2=64,k3=3,c3=64,p1=0,p2=1,p3=0) #64 to 192 hunuparxa ki/?????????
def forward(self, obs_t_minus_2, obs_t_minus_1, obs_t_0):#Each embedding has a shape [1, 64, 12, 12] at first.
input = torch.cat((obs_t_minus_2,obs_t_minus_1),dim = 1)#Concatenating horizontally gives a shape [1, 64, 12, 24]
input = torch.cat((input,obs_t_0),dim = 1)#Again concatenating gives [1, 128, 12, 12]
input = self.conv_stack(input)
return input #returns the first state, s0 with dimensions 1, 192, 10, 10, wrong not 192 but 64
class Pool_and_Inject(nn.Module):
def __init__(self):
super(Pool_and_Inject, self).__init__()
self.conv1 = nn.Conv2d(in_channels=86, out_channels= 32, kernel_size=3, stride= 1, padding= 0)
self.maxpool = nn.MaxPool2d(kernel_size=3) #kernel size not specified in the paper, so using 3
#An added layer to make dimensions correct
self.added_conv = nn.Conv2d(in_channels=118,out_channels=64, kernel_size=3, stride=1,padding=1)
def forward(self, input): #input shape: [1, 86, 10, 10]
input_ = self.conv1(input)
input_ = self.maxpool(input_) #input_ shape [1, 32, 2, 2]
input_to_concat = torch.repeat_interleave(input_,repeats = 5, dim = 2) #Tile operation
input_to_concat = torch.repeat_interleave(input_to_concat, repeats=5, dim=3) # Tile operation
input_ = torch.cat((input,input_to_concat), dim=1)
input_ = self.added_conv(input_)
return input_ #returns [1, 64, 10, 10]
class State_Transition_Module(nn.Module):
def __init__(self):
super(State_Transition_Module, self).__init__()
self.res_conv_1 = Residual_Conv_Stack(in_channelss= 86, k1=3, c1=32, k2=5, c2=32, k3=3, c3=64, s1=1, s2=1, s3=1, p1=1, p2=2, p3=1, final_op=86) #gives 1, 86, 10, 10
self.leaky_relu = nn.LeakyReLU()
self.pool_and_inject = Pool_and_Inject()
self.res_conv_2 = Residual_Conv_Stack(in_channelss=64, k1=3, c1=32, k2=5, c2=32, k3=3, c3=64, s1=1, s2=1, s3=1, p1=1, p2=0, p3=3, final_op=64)
def forward(self, last_state, not_reshaped_last_action):
reshaped_last_action = not_reshaped_last_action.reshape((1,22,1,1)) #converting into 22 channels ffrom a vecroe to make it suitable for tile operation
action_to_concat = torch.repeat_interleave(reshaped_last_action,repeats = last_state.shape[2], dim = 2) #Tile operation
action_to_concat = torch.repeat_interleave(action_to_concat, repeats = last_state.shape[3], dim = 3)
input = torch.cat((last_state, action_to_concat),dim=1) #state shape: 1, 64, 10, 10, action shape: 1,22,10,10
input = self.res_conv_1(input) #input fed to resconv shape 1, 86, 10, 10
input = self.leaky_relu(input)
input = self.pool_and_inject(input)
input = self.res_conv_2(input) #input fed to resconv shape 1, 118, 10, 10
return input #returns a state of dimensions 1, 64, 10, 10
class Decoder_Module(nn.Module):
def __init__(self):
super(Decoder_Module, self).__init__()
# LHS
self.conv1 = nn.Conv2d(in_channels=64, out_channels=24, kernel_size=3)
self.leaky_relu = nn.LeakyReLU()
# will flatten after this to feed to a linear layer
self.linear_1 = nn.Linear(in_features=1*24*8*8, out_features=128) #input shape is 1, 24, 8, 8
################For Predicted Reward#############################
self.linear_2 = nn.Linear(in_features=128, out_features=1)
#RHS
self.conv_stack_1 = Conv_Stack(in_1=64,in_2=32, k1=1, c1=32, k2=5, c2=32, k3=3, c3=64, s1=1, s2=1, s3=1, p1=0,p2=2,p3=0)
self.leaky_relu_1 = nn.LeakyReLU()
self.conv_stack_2 = Conv_Stack(in_1=16,in_2=64, k1=3, c1=64, k2=3, c2=64, k3=1, c3=48, s1=1, s2=1, s3=1, p1=0,p2=1,p3=0)
self.linear_added = nn.Linear(in_features=1*3*56*56, out_features=150*150)
################For Predicted Observation########################
def forward(self, input): #gets a state as the input with dimensions 1, 192, 10, 10
#LHS
input_1 = self.conv1(input)
input_1 = self.leaky_relu(input_1) #output shape is 1, 24, 8, 8
input_1 = self.linear_1(input_1.flatten())
input_1 = self.linear_2(input_1)
#RHS
input_2 = self.conv_stack_1(input)
input_2 = self.leaky_relu_1(input_2)
input_2 = torch.nn.functional.pixel_shuffle(input_2,2) #output is 1,16,16,16
input_2 = self.conv_stack_2(input_2)
input_2 = torch.nn.functional.pixel_shuffle(input_2,2) #output is [1, 3, 56, 56], so flattening gives 1*3*56*56
input_2 = (self.linear_added(input_2.flatten())) #output is 150*150
return input_1, input_2
'''
The .view() operation gives you a new view on the tensor without copying any data.
This means view is cheap to call and you don’t think too much about potential performance issues.
''' | StarcoderdataPython |
6510403 | import bpy
import mathutils
import random
import sys
import json
# This enables necessary 'impulse.py' plugin
import os
path = os.getcwd()
impulse_path = os.path.join(path, 'impulse.py')
bpy.ops.wm.addon_install(filepath=impulse_path)
bpy.ops.wm.addon_enable(module='impulse')
bpy.ops.wm.save_userpref()
print("Reading config.json file ...")
config = json.load(open('config.json'))
# set default values if not present in config
for k in ['angular_velocity_std', 'linear_velocity_std', 'angular_velocity_mean', 'linear_velocity_mean']:
if not k in config:
config[k] = 0.0
# set default values for uniform rotation setting
if not 'uniformly_rotate' in config:
config['uniformly_rotate'] = True
scn = bpy.context.scene
# A rectangular grid of coins is created by
# copying one coin located at (0.0, 0.0, 50.0)
# point in world coordinate system.
coin_thickness = config['coin_thickness'] # in cm
coin_diameter = config['coin_diameter'] # in cm
n = config['coin_grid_size'] # size of coin grid in every dimension
grid_step = config['coin_grid_step'] # distance between every coin in a grid, cm
angular_velocity_std = config['angular_velocity_std'] # cm/s, standard deviation of normal distribution of angular speeds
linear_velocity_std = config['linear_velocity_std'] # cm/s, standard deviation of normal distribution of linear speeds
angular_velocity_mean = config['angular_velocity_mean'] # cm/s, standard deviation of normal distribution of angular speeds
linear_velocity_mean = config['linear_velocity_mean'] # cm/s, standard deviation of normal distribution of linear speeds
coin_density = config['coin_density'] # grams / cm^3
coin_friction = config['coin_friction'] # friction of coin
coin_restitution = config['coin_restitution'] # bounciness/restitution of coin
table_friction = config['table_friction'] # friction of coin
table_restitution = config['table_restitution'] # bounciness/restitution of coin
uniformly_rotate = config['uniformly_rotate'] # whether to rotate uniformly the coin or not
# Some data sources
# material densities: http://www.semicore.com/reference/density-reference
# friction coefficients: https://en.wikipedia.org/wiki/Friction#Approximate_coefficients_of_friction
# restitution coefficients: https://hypertextbook.com/facts/2006/restitution.shtml
# Configuration of simulation
exit_when_done = config['exit_when_done']
# minimum x and y coordinates of coins in a grid
min_coord = -n * 0.5 * grid_step
pi = 3.1415926535
# names of objects
coin_name = 'Coin'
table_name = 'Table'
print("Creating a grid of %s coins ..." % (n*n))
# set the thickness of a coin
bpy.data.objects[coin_name].dimensions[0] = coin_diameter
bpy.data.objects[coin_name].dimensions[1] = coin_diameter
bpy.data.objects[coin_name].dimensions[2] = coin_thickness
# deselect all the objects
for obj in bpy.data.objects:
obj.select = False
# select the original cylinder
coin_orig = bpy.data.objects[coin_name]
table = bpy.data.objects[table_name]
coin_orig.select = True
# set the rigid body properties
coin_orig.rigid_body.friction = coin_friction
coin_orig.rigid_body.restitution = coin_restitution
# mass = volume * density; density in miligram / cm^3
coin_radius = coin_diameter / 2.0
coin_orig.rigid_body.mass = (pi * coin_thickness * coin_radius * coin_radius) * coin_density * 1000.0
table.rigid_body.friction = table_friction
table.rigid_body.restitution = table_restitution
# create n*n copies; 1 cylinder is already there
for i in range(n*n-1):
bpy.ops.object.duplicate(linked=0,mode='TRANSLATION')
coin_orig.select = False
coins = []
def normal_v_vector(mean=0.0, sigma=1.0, sz=3):
# if mean is a number, convert it to vector
if not isinstance(mean, list):
mean = [mean, mean, mean]
# same for sigma
if not isinstance(sigma, list):
sigma = [sigma, sigma, sigma]
result = tuple(random.normalvariate(m, s) for m, s in zip(mean, sigma))
return result
# code that does uniform rotation of the object
def rotate_uniform(s):
"""Apply rotation such that every orientation of
object is equally likely. See misc/check_uniform_rotations.blend
for verification."""
o = normal_v_vector()
DirectionVector = mathutils.Vector(o)
s.rotation_mode = 'QUATERNION'
s.rotation_quaternion = DirectionVector.to_track_quat('Z','Y')
s.rotation_mode = 'XYZ'
# get all coins
for obj in bpy.data.objects:
if obj.name.startswith(coin_name):
coins.append(obj)
# move the cyllinders
idx = 0
for i in range(n):
for j in range(n):
obj = coins[idx]
idx += 1
# add initial location and orientation
loc = obj.location
obj.location = loc + mathutils.Vector((min_coord + i * grid_step, min_coord + j * grid_step, 0.0))
if uniformly_rotate:
rotate_uniform(obj)
# add object to impulse
bpy.context.scene.objects.active = obj
bpy.ops.rigidbody.impulse_add_object()
# All speeds are in cm/s; The values in Impulse plugin do not take into
# account the scaling of blender units.
obj.impulse_props.v = normal_v_vector(linear_velocity_mean, linear_velocity_std)
obj.impulse_props.av = normal_v_vector(angular_velocity_mean, angular_velocity_std)
# for some reason double call to the function is needed to make it work :/
bpy.ops.rigidbody.impulse_execute()
bpy.ops.rigidbody.impulse_execute()
# calculate necessary physics
print('Simulating falling coins ...')
bpy.ops.ptcache.bake_all(bake=True)
print('Counting the coin orientation ...')
bpy.context.scene.frame_set(1000)
edge_count = 0.0
heads_count = 0.0
tails_count = 0.0
for c in coins:
mesh = c.data
mat = c.matrix_world
# Coordinates of points on heads and tails edge.
# Let coordinate system be located in the center
# of the cylinder of height h. Then two points are
# considered:
# Tails=(0.0, 1.0, -0.5h), Heads=(0.0, 1.0, 0.5h)
# Both points are converted into world coordinate
# system, and thus represent orientation of cylinder.
# relative to the table, which is a flat surface with
# constant z value.
# Tails
tx, ty, tz = mat * mesh.vertices[0].co
# Heads
hx, hy, hz = mat * mesh.vertices[1].co
# if the cylinder is laying, then bz ~ tz
if abs(tz - hz) < 0.01:
edge_count += 1.0
# if the x and y coordinates coincide, the cylinder has
# fallen either on heads or tails
if abs(tx - hx) < 0.01 and abs(ty - hy) < 0.01:
if hz < tz: # coin settled on tails
tails_count += 1.0
else:
heads_count += 1.0
result = {
'Edge': edge_count,
'Tails': tails_count,
'Heads': heads_count,
}
json.dump(
result,
open('result.json', 'w')
)
print(result)
print('Done!')
if exit_when_done:
sys.exit()
| StarcoderdataPython |
273998 | <gh_stars>0
import numpy as np
import pytest
from eski import atoms, drivers, md
class TestDriver:
@pytest.mark.parametrize(
"driver_type,parameters",
[
(drivers.Driver, []),
pytest.param(
drivers.Driver, [0],
marks=pytest.mark.raises(exception=ValueError)
),
(drivers.EulerIntegrator, [0.1]),
]
)
def test_create(self, driver_type, parameters, file_regression):
driver = driver_type(parameters)
file_regression.check(repr(driver))
@pytest.mark.parametrize(
"driver_type,parameters",
[
(drivers.Driver, {}),
(drivers.EulerIntegrator, {"dt": 0.1}),
]
)
def test_create_from_mapping(self, driver_type, parameters):
driver = driver_type.from_mapping(parameters)
assert isinstance(driver, driver_type)
@pytest.mark.parametrize(
"driver_type,parameters",
[
(drivers.EulerIntegrator, [0.1]),
]
)
def test_update(self, driver_type, parameters, num_regression):
driver = driver_type(parameters)
configuration = np.array(
[[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]], order="c", dtype=float
)
velocities = np.array(
[[0, 0, 0],
[0, 0, 0],
[1, 0, 0],
[0, 0, 1]], order="c", dtype=float
).reshape(-1)
forces = np.array(
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 0, -1]], order="c", dtype=float
).reshape(-1)
system = md.System(
configuration,
velocities=velocities,
atoms=[atoms.Atom(mass=1) for _ in range(configuration.shape[0])]
)
driver.update(system)
num_regression.check({
"configuration": system.configuration,
"velocities": system.velocities,
})
| StarcoderdataPython |
3335039 | class PAMValidationError(Exception):
"""
Custom exception raised for an Activity Plan validation Error.
"""
pass
class PAMSequenceValidationError(PAMValidationError):
"""
Custom exception raised for an Activity Plan sequence validation Error.
"""
pass
class PAMTimesValidationError(PAMValidationError):
"""
Custom exception raised for an Activity Plan Time validation Error.
"""
pass
class PAMValidationLocationsError(PAMValidationError):
"""
Custom exception raised for an Activity Plan Locations validation Error.
"""
pass
| StarcoderdataPython |
8191033 | ## The souce code is from: https://github.com/OniroAI/MonoDepth-PyTorch
from __future__ import absolute_import, division, print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as tf
from .modules_camconv import CamConvModule
class conv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, stride):
super(conv, self).__init__()
self.kernel_size = kernel_size
self.conv_base = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=kernel_size, stride=stride)
self.normalize = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
p = int(np.floor((self.kernel_size-1)/2))
p2d = (p, p, p, p)
x = self.conv_base(tf.pad(x, p2d))
x = self.normalize(x)
return tf.elu(x, inplace=True)
class convblock(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size):
super(convblock, self).__init__()
self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)
self.conv2 = conv(num_out_layers, num_out_layers, kernel_size, 2)
def forward(self, x):
x = self.conv1(x)
return self.conv2(x)
class maxpool(nn.Module):
def __init__(self, kernel_size):
super(maxpool, self).__init__()
self.kernel_size = kernel_size
def forward(self, x):
p = int(np.floor((self.kernel_size-1) / 2))
p2d = (p, p, p, p)
return tf.max_pool2d(tf.pad(x, p2d), self.kernel_size, stride=2)
class resconv_basic(nn.Module):
# for resnet18
def __init__(self, num_in_layers, num_out_layers, stride):
super(resconv_basic, self).__init__()
self.num_out_layers = num_out_layers
self.stride = stride
self.conv1 = conv(num_in_layers, num_out_layers, 3, stride)
self.conv2 = conv(num_out_layers, num_out_layers, 3, 1)
self.conv3 = nn.Conv2d(num_in_layers, num_out_layers, kernel_size=1, stride=stride)
self.normalize = nn.BatchNorm2d(num_out_layers)
def forward(self, x):
do_proj = True
shortcut = []
x_out = self.conv1(x)
x_out = self.conv2(x_out)
if do_proj:
shortcut = self.conv3(x)
else:
shortcut = x
return tf.elu(self.normalize(x_out + shortcut), inplace=True)
def resblock_basic(num_in_layers, num_out_layers, num_blocks, stride):
layers = []
layers.append(resconv_basic(num_in_layers, num_out_layers, stride))
for i in range(1, num_blocks):
layers.append(resconv_basic(num_out_layers, num_out_layers, 1))
return nn.Sequential(*layers)
class upconv(nn.Module):
def __init__(self, num_in_layers, num_out_layers, kernel_size, scale):
super(upconv, self).__init__()
self.scale = scale
self.conv1 = conv(num_in_layers, num_out_layers, kernel_size, 1)
def forward(self, x):
x = nn.functional.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
return self.conv1(x)
class get_disp_1ch(nn.Module):
def __init__(self, num_in_layers):
super(get_disp_1ch, self).__init__()
self.conv1 = nn.Conv2d(num_in_layers, 1, kernel_size=3, stride=1)
self.normalize = nn.BatchNorm2d(1)
self.sigmoid = torch.nn.Sigmoid()
def forward(self, x):
p = 1
p2d = (p, p, p, p)
x = self.conv1(tf.pad(x, p2d))
x = self.normalize(x)
return 0.3 * self.sigmoid(x)
class Resnet18_MonoDepth_Single(nn.Module):
def __init__(self):
super(Resnet18_MonoDepth_Single, self).__init__()
# encoder
self.conv1 = conv(3, 64, 7, 2) # H/2 - 64D
self.pool1 = maxpool(3) # H/4 - 64D
self.conv2 = resblock_basic(64, 64, 2, 2) # H/8 - 64D
self.conv3 = resblock_basic(64, 128, 2, 2) # H/16 - 128D
self.conv4 = resblock_basic(128, 256, 2, 2) # H/32 - 256D
self.conv5 = resblock_basic(256, 512, 2, 2) # H/64 - 512D
# decoder
self.upconv6 = upconv(512, 512, 3, 2)
self.iconv6 = conv(256+512, 512, 3, 1)
self.upconv5 = upconv(512, 256, 3, 2)
self.iconv5 = conv(128+256, 256, 3, 1)
self.upconv4 = upconv(256, 128, 3, 2)
self.iconv4 = conv(64+128, 128, 3, 1)
self.disp4_layer = get_disp_1ch(128)
self.upconv3 = upconv(128, 64, 3, 2)
self.iconv3 = conv(64+64 + 1, 64, 3, 1)
self.disp3_layer = get_disp_1ch(64)
self.upconv2 = upconv(64, 32, 3, 2)
self.iconv2 = conv(64+32 + 1, 32, 3, 1)
self.disp2_layer = get_disp_1ch(32)
self.upconv1 = upconv(32, 16, 3, 2)
self.iconv1 = conv(16+1, 16, 3, 1)
self.disp1_layer = get_disp_1ch(16)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
def forward(self, x):
# encoder
x1 = self.conv1(x)
x_pool1 = self.pool1(x1)
x2 = self.conv2(x_pool1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
# skips
skip1 = x1
skip2 = x_pool1
skip3 = x2
skip4 = x3
skip5 = x4
# decoder
upconv6 = self.upconv6(x5)
concat6 = torch.cat((upconv6, skip5), 1)
iconv6 = self.iconv6(concat6)
upconv5 = self.upconv5(iconv6)
concat5 = torch.cat((upconv5, skip4), 1)
iconv5 = self.iconv5(concat5)
upconv4 = self.upconv4(iconv5)
concat4 = torch.cat((upconv4, skip3), 1)
iconv4 = self.iconv4(concat4)
self.disp4 = self.disp4_layer(iconv4)
self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True)
upconv3 = self.upconv3(iconv4)
concat3 = torch.cat((upconv3, skip2, self.udisp4), 1)
iconv3 = self.iconv3(concat3)
self.disp3 = self.disp3_layer(iconv3)
self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = self.upconv2(iconv3)
concat2 = torch.cat((upconv2, skip1, self.udisp3), 1)
iconv2 = self.iconv2(concat2)
self.disp2 = self.disp2_layer(iconv2)
self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True)
upconv1 = self.upconv1(iconv2)
concat1 = torch.cat((upconv1, self.udisp2), 1)
iconv1 = self.iconv1(concat1)
self.disp1 = self.disp1_layer(iconv1)
return self.disp1, self.disp2, self.disp3, self.disp4
class Resnet18_MonoDepth_Single_CamConv(nn.Module):
def __init__(self):
super(Resnet18_MonoDepth_Single_CamConv, self).__init__()
# encoder
self.conv1 = conv(3, 64, 7, 2) # H/2 - 64D
self.pool1 = maxpool(3) # H/4 - 64D
self.conv2 = resblock_basic(64, 64, 2, 2) # H/8 - 64D
self.conv3 = resblock_basic(64, 128, 2, 2) # H/16 - 128D
self.conv4 = resblock_basic(128, 256, 2, 2) # H/32 - 256D
self.conv5 = resblock_basic(256, 512, 2, 2) # H/64 - 512D
# decoder
self.upconv6 = upconv(512 + 6, 512, 3, 2)
self.iconv6 = conv(256+512 + 6, 512, 3, 1)
self.upconv5 = upconv(512, 256, 3, 2)
self.iconv5 = conv(128+256 + 6, 256, 3, 1)
self.upconv4 = upconv(256, 128, 3, 2)
self.iconv4 = conv(64+128 + 6, 128, 3, 1)
self.disp4_layer = get_disp_1ch(128)
self.upconv3 = upconv(128, 64, 3, 2)
self.iconv3 = conv(64+64 + 1 + 6, 64, 3, 1)
self.disp3_layer = get_disp_1ch(64)
self.upconv2 = upconv(64, 32, 3, 2)
self.iconv2 = conv(64+32 + 1 + 6, 32, 3, 1)
self.disp2_layer = get_disp_1ch(32)
self.upconv1 = upconv(32, 16, 3, 2)
self.iconv1 = conv(16+1, 16, 3, 1)
self.disp1_layer = get_disp_1ch(16)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform_(m.weight)
self.camconv = CamConvModule()
def forward(self, x, intrinsic):
# encoder
x1 = self.conv1(x)
x_pool1 = self.pool1(x1)
x2 = self.conv2(x_pool1)
x3 = self.conv3(x2)
x4 = self.conv4(x3)
x5 = self.conv5(x4)
# skips
skip1 = self.camconv(x1, x, intrinsic)
skip2 = self.camconv(x_pool1)
skip3 = self.camconv(x2)
skip4 = self.camconv(x3)
skip5 = self.camconv(x4)
# decoder
upconv6 = self.upconv6(self.camconv(x5))
concat6 = torch.cat((upconv6, skip5), 1)
iconv6 = self.iconv6(concat6)
upconv5 = self.upconv5(iconv6)
concat5 = torch.cat((upconv5, skip4), 1)
iconv5 = self.iconv5(concat5)
upconv4 = self.upconv4(iconv5)
concat4 = torch.cat((upconv4, skip3), 1)
iconv4 = self.iconv4(concat4)
self.disp4 = self.disp4_layer(iconv4)
self.udisp4 = nn.functional.interpolate(self.disp4, scale_factor=2, mode='bilinear', align_corners=True)
upconv3 = self.upconv3(iconv4)
concat3 = torch.cat((upconv3, skip2, self.udisp4), 1)
iconv3 = self.iconv3(concat3)
self.disp3 = self.disp3_layer(iconv3)
self.udisp3 = nn.functional.interpolate(self.disp3, scale_factor=2, mode='bilinear', align_corners=True)
upconv2 = self.upconv2(iconv3)
concat2 = torch.cat((upconv2, skip1, self.udisp3), 1)
iconv2 = self.iconv2(concat2)
self.disp2 = self.disp2_layer(iconv2)
self.udisp2 = nn.functional.interpolate(self.disp2, scale_factor=2, mode='bilinear', align_corners=True)
upconv1 = self.upconv1(iconv2)
concat1 = torch.cat((upconv1, self.udisp2), 1)
iconv1 = self.iconv1(concat1)
self.disp1 = self.disp1_layer(iconv1)
return self.disp1, self.disp2, self.disp3, self.disp4 | StarcoderdataPython |
5039711 | <reponame>clcert/beacon-verifier
# Real time script that verifies that the data extracted by the
# beacon collector is the same public data that this host can observe, or
# has a tolerable delay.
import datetime
import queue
import time
import requests
import binascii
import json
import argparse
from requests.exceptions import ConnectionError
from json.decoder import JSONDecodeError
from bs4 import BeautifulSoup
import threading
CLCERT_BEACON_URL = "https://beacon.clcert.cl/"
PULSE_PREFIX = "beacon/1.0/pulse/"
RAW_PREFIX = "beacon/1.0/raw/"
class BeaconServerError(Exception):
def __init__(self):
pass
class BeaconPulseError(Exception):
def __init__(self):
pass
class SourceCollector:
def __init__(self, que, s):
self.que = que
self.source = s
def process(self):
try:
self.collect_event()
except Exception:
if self.source == 'r':
self.que.put(('radio', 'timeout error'))
elif self.source == 'e':
self.que.put(('earthquake', 'timeout error'))
elif self.source == 't':
self.que.put(('twitter', 'timeout error'))
return
def collect_event(self):
pass
class RadioStream(SourceCollector):
def __init__(self, que, s):
super().__init__(que, s)
def collect_event(self):
now_utc = datetime.datetime.utcnow()
stream_url = "http://stream3.rbm.cl:8010/playerweb.aac"
raw_result = ''
raw_block = b''
time.sleep(5 - now_utc.second)
r = requests.get(stream_url, stream=True, timeout=1)
# Get 25 blocks of 1024 bytes each (25 kB, approx. 3 seconds of audio)
i = c = 0
delimiter = '00' * 45 # 45 bytes of 0s
for block in r.iter_content(1024):
if i < 260 and c < 25:
i += 1
result = binascii.b2a_hex(block).decode()
if delimiter in result or (0 < c < 25):
raw_block += block
raw_result += result
c += 1
i -= 1
else:
self.que.put(('radio', raw_result))
return
self.que.put(('radio', raw_result))
return
def truncate_data(data):
if len(data) > 80:
return data[:79] + '\n'
else:
return data + ('#' * (79 - len(data))) + '\n'
class EarthquakeWeb(SourceCollector):
def __init__(self, que, s):
super().__init__(que, s)
def collect_event(self):
now_cl = datetime.datetime.now(tz=datetime.timezone(-datetime.timedelta(hours=3)))
current_year = str(now_cl.year)
current_month = str(now_cl.month) if len(str(now_cl.month)) == 2 else '0' + str(now_cl.month)
current_day = str(now_cl.day) if len(str(now_cl.day)) == 2 else '0' + str(now_cl.day)
url = 'http://sismologia.cl/events/listados/' + current_year + '/' + current_month + \
'/' + current_year + current_month + current_day + '.html'
# Wait until reach half a minute
time.sleep(30 - datetime.datetime.utcnow().second)
# Gets the last earthquake produced over degree 2.5 informed by sismologia.cl
web = requests.get(url, timeout=5).content
soup = BeautifulSoup(web, "html.parser")
earthquakes_text = soup.findAll("tr", {'class': ['impar', 'par']})[1:]
earthquakes = []
for earthquake_text in earthquakes_text:
params = []
for parameter in earthquake_text.contents:
params.append(parameter.text)
date = params[0]
latitude = params[2]
longitude = params[3]
depth = params[4]
magnitude = params[5]
reference = params[6]
single_earthquake = "%s %s %s %s %s" % (adjust(date, 'date'), adjust(latitude, 'coord'),
adjust(longitude, 'coord'), adjust(depth, 'magnitude'),
adjust(magnitude, 'depth'))
earthquakes.append(single_earthquake)
raw_result = ''
for earthquake in earthquakes:
raw_result = earthquake
if float(raw_result.split(' ')[-1]) >= 3:
break
self.que.put(('earthquake', raw_result))
def adjust(data, option):
if option == 'coord':
return str(float('%.1f' % (float(data))))
if option == 'magnitude' or option == 'depth':
return data.split(' ')[0]
return data
def get_json(url):
time.sleep(0.05) # Prevent 'Too Many Requests' response from server
try:
return json.loads(requests.get(url).content) # TODO: change for not self signed certificate
except ConnectionError:
raise BeaconServerError
except JSONDecodeError:
raise BeaconPulseError
class EthereumBlockchain(SourceCollector):
def __init__(self, que, s):
super().__init__(que, s)
def collect_event(self):
time.sleep(25 - datetime.datetime.utcnow().second)
eth_api = 'https://api.blockcypher.com/v1/eth/main'
request = requests.get(eth_api, timeout=5)
content = json.loads(request.content)
last_block_hash = content["hash"]
last_block_prev_hash = content["previous_hash"]
last_block_height = content["height"]
self.que.put(('ethereum', last_block_hash, last_block_prev_hash, last_block_height))
class TrendingTwitter(SourceCollector):
def __init__(self, que, s):
super().__init__(que, s)
def check_eq_event(collected, reported, event_status, current_time):
if event_status == 0:
if collected == reported:
pass
else:
print('EARTHQUAKE ERROR!\t' + current_time)
print('col: ' + collected)
print('rep: ' + reported)
else:
print('BEACON EVENT ERROR!\t' + current_time)
# Validates if the ethereum event received by the verifier is the "same" as the one reported by the Beacon.
# The are three conditions under the check will be valid:
# 1. The hash of the blocks are the same.
# 2. The hash of the previous block collected is the same as the one reported.
# 3. There are, at most, 2 blocks of difference between the collected and the reported one.
# In any other case, the validation will not be valid.
def check_eth_event(collected, reported, event_status, curr_time):
if event_status != 0:
print('BEACON EVENT ERROR!\t' + curr_time)
return 3 # beacon service error
if collected == '':
print('ETHEREUM COLLECTED EVENT ERROR!\t' + curr_time)
return 2 # verifier error
col_eth_hash = collected[0]
col_eth_phash = collected[1]
col_eth_height = collected[2]
rep_values = reported.split()
rep_eth_hash = rep_values[0]
rep_eth_height = int(rep_values[1])
if col_eth_hash == rep_eth_hash or col_eth_phash == rep_eth_hash or abs(col_eth_height - rep_eth_height) <= 2:
return 0 # valid verification
else:
print('ETHEREUM ERROR!\t' + curr_time)
print('col height: ' + str(col_eth_height))
print('rep height: ' + str(rep_eth_height))
print('col hash: ' + col_eth_hash)
print('rep hash: ' + rep_eth_hash)
return 1 # invalid verification
# PARSE OPTIONS
parser = argparse.ArgumentParser(description="Real-Time Script for External Events collected by CLCERT Random Beacon")
parser.add_argument("-w", "--beacon-web",
action="store", dest="beacon_web", default="", type=str,
help="beacon server web host")
parser.add_argument("-a", "--all-sources",
action="store_true", dest="all_sources", default=False,
help="check all sources")
parser.add_argument("-e", "--earthquake",
action="store_true", dest="eq_check", default=False,
help="check earthquake collector")
parser.add_argument("-r", "--radio",
action="store_true", dest="radio_check", default=False,
help="check radio collector")
parser.add_argument("-t", "--twitter",
action="store_true", dest="tw_check", default=False,
help="check twitter collector")
parser.add_argument("-b", "--blockchain",
action="store_true", dest="block_check", default=False,
help="check blockchain (ethereum) collector")
options = parser.parse_args()
print("CLCERT Random Beacon - Real-Time Verifier")
# CHECK BEACON HOST OPTION
if options.beacon_web != "":
CLCERT_BEACON_URL = options.beacon_web
# CHECK WHICH SOURCES TO VERIFY
if options.all_sources:
options.eq_check = True
options.radio_check = True
options.tw_check = True
options.block_check = True
# Wait for the current minute to end
second_mark_init = 0
while 1:
if datetime.datetime.now().second == second_mark_init:
break
else:
now_utc = datetime.datetime.now()
time_to_wait = (60 - now_utc.second - 1) + second_mark_init + ((1000000 - now_utc.microsecond) / 1000000)
time.sleep(time_to_wait)
# Execute the main function at the beginning of each minute
while 1:
# Queue return value to a Queue object
q = queue.Queue()
# Process each collector as a separate thread
threads = []
if options.eq_check:
t0 = threading.Thread(target=EarthquakeWeb(q, 'e').process)
threads.append(t0)
earthquake_event = ''
if options.tw_check:
t1 = threading.Thread(target=TrendingTwitter(q, 't').process)
threads.append(t1)
twitter_event = ''
if options.radio_check:
t2 = threading.Thread(target=RadioStream(q, 'r').process)
threads.append(t2)
radio_event = ''
if options.block_check:
t3 = threading.Thread(target=EthereumBlockchain(q, 'b').process)
threads.append(t3)
ethereum_event = ''
# Start all threads
for t in threads:
t.start()
# Wait all threads to finish
for t in threads:
t.join()
while not q.empty():
element = q.get()
if element[0] == 'radio':
radio_event = element[1]
elif element[0] == 'earthquake':
earthquake_event = element[1]
elif element[0] == 'twitter':
twitter_event = element[1]
elif element[0] == 'ethereum':
ethereum_event = element[1:]
# Wait until 35 second mark to retrieve external events values
now_utc = datetime.datetime.utcnow()
wait_time = 35 - now_utc.second
if wait_time < 0:
print(str(now_utc.minute) + ':' + str(now_utc.second))
print('Error in waiting time')
time.sleep(wait_time + 35)
else:
time.sleep(wait_time)
try:
last_pulse = get_json(CLCERT_BEACON_URL + PULSE_PREFIX + "last")
except (BeaconServerError, BeaconPulseError):
print('ERROR IN SERVER\t\t ' + str(now_utc.replace(second=0, microsecond=0)))
now_utc = datetime.datetime.now()
time_to_wait = (60 - now_utc.second - 1) + second_mark_init + ((1000000 - now_utc.microsecond) / 1000000)
time.sleep(time_to_wait)
continue
last_pulse_id = int(last_pulse["id"])
try:
last_pulse_raw_events = get_json(CLCERT_BEACON_URL + RAW_PREFIX + "id/" + str(last_pulse_id + 1))
except (BeaconServerError, BeaconPulseError):
print('ERROR IN SERVER\t\t ' + str(now_utc.replace(second=0, microsecond=0)))
now_utc = datetime.datetime.now()
time_to_wait = (60 - now_utc.second - 1) + second_mark_init + ((1000000 - now_utc.microsecond) / 1000000)
time.sleep(time_to_wait)
continue
last_pulse_earthquake_raw_value = ''
last_pulse_earthquake_status = 0
last_pulse_radio_raw_value = ''
last_pulse_radio_status = 0
last_pulse_twitter_raw_value = ''
last_pulse_twitter_status = 0
last_pulse_ethereum_raw_value = ''
last_pulse_ethereum_status = 0
for event in last_pulse_raw_events:
if event["source_id"] == 1:
last_pulse_earthquake_raw_value = event["raw_value"]
last_pulse_earthquake_status = event["status_code"]
elif event["source_id"] == 2:
last_pulse_twitter_raw_value = event["raw_value"]
last_pulse_twitter_status = event["status_code"]
elif event["source_id"] == 3:
last_pulse_radio_raw_value = event["raw_value"]
last_pulse_radio_status = event["status_code"]
elif event["source_id"] == 4:
last_pulse_ethereum_raw_value = event["raw_value"]
last_pulse_ethereum_status = event["status_code"]
# Check differences between what verifier collected and what the beacon reported
current_time = str(now_utc.replace(second=0, microsecond=0))
if options.eq_check:
check_eq_event(earthquake_event, last_pulse_earthquake_raw_value, last_pulse_earthquake_status, current_time)
if options.radio_check:
if radio_event != last_pulse_radio_raw_value:
if radio_event != 'timeout error':
if last_pulse_radio_status == 0:
print('ERROR RADIO\t\t\t ' + current_time)
else:
print('EVENT ERROR RADIO ' + current_time)
else:
print('TIMEOUT RADIO\t\t ' + current_time)
if options.tw_check:
if twitter_event != last_pulse_twitter_raw_value:
if twitter_event != 'timeout error':
if last_pulse_twitter_status == 0:
print('ERROR TWITTER\t\t\t ' + current_time)
else:
print('EVENT ERROR TWITTER ' + current_time)
else:
print('TIMEOUT TWITTER\t\t ' + current_time)
if options.block_check:
if check_eth_event(ethereum_event, last_pulse_ethereum_raw_value, last_pulse_ethereum_status, current_time) == 0:
print("success!\t" + current_time)
# Wait until the next minute
now_utc = datetime.datetime.utcnow()
time_to_wait = (60 - now_utc.second - 1) + second_mark_init + ((1000000 - now_utc.microsecond) / 1000000)
time.sleep(time_to_wait)
| StarcoderdataPython |
8185809 | from pathlib import Path
import numpy
import pytest
from numpy import ndarray
from bio_embeddings.embed import EmbedderInterface
from bio_embeddings.embed.pipeline import embed_and_write_batched
from bio_embeddings.utilities.filemanagers import FileSystemFileManager
# noinspection PyProtectedMember
from bio_embeddings.utilities.pipeline import _process_fasta_file
class FakeEmbedder(EmbedderInterface):
embedding_dimension = 1024
number_of_layers = 1
def embed(self, sequence: str) -> ndarray:
return numpy.asarray([])
@staticmethod
def reduce_per_protein(embedding: ndarray) -> ndarray:
return embedding
def test_simple_remapping(pytestconfig, tmp_path: Path):
"""https://github.com/sacdallago/bio_embeddings/issues/50"""
global_parameters = {
"sequences_file": str(
pytestconfig.rootpath.joinpath("test-data/seqwence-protein.fasta")
),
"prefix": str(tmp_path),
"simple_remapping": True,
}
global_parameters = _process_fasta_file(**global_parameters)
embed_and_write_batched(FakeEmbedder(), FileSystemFileManager(), global_parameters)
def test_illegal_amino_acids(caplog, pytestconfig, tmp_path: Path):
"""https://github.com/sacdallago/bio_embeddings/issues/54"""
input_file = pytestconfig.rootpath.joinpath("test-data/illegal_amino_acids.fasta")
_process_fasta_file(sequences_file=str(input_file), prefix=str(tmp_path))
assert caplog.messages == [
f"The entry 'lowercase' in {input_file} contains lower "
"case amino acids. Lower case letters are uninterpretable by most language "
"models, and their embedding will be nonesensical. Protein LMs available "
"through bio_embeddings have been trained on upper case, single letter code "
"sequence representations only "
"(https://en.wikipedia.org/wiki/Amino_acid#Table_of_standard_amino_acid_abbreviations_and_properties)."
]
def test_unparsable_fasta(caplog, pytestconfig, tmp_path: Path):
input_file = pytestconfig.rootpath.joinpath("test-data/unparsable.fasta")
with pytest.raises(
ValueError,
match=f"Could not parse '{input_file}'. Are you sure this is a valid fasta file?",
):
_process_fasta_file(sequences_file=str(input_file), prefix=str(tmp_path))
assert caplog.messages == []
def test_broken_fasta(pytestconfig, tmp_path: Path):
"""Ensure that we print a reasonable message when the user feeds in a broken fasta file.
Unfortunately, we can't detect if the user fed in a markdown file
instead of a fasta, because we could parse that markdown file
as fasta:
> Following the initial line (used for a unique description of the
> sequence) was the actual sequence itself in standard one-letter
> character string. Anything other than a valid character would
> be ignored (including spaces, tabulators, asterisks, etc...).
> It was also common to end the sequence with an "*" (asterisk)
> character (in analogy with use in PIR formatted sequences) and,
> for the same reason, to leave a blank line between the description
> and the sequence.
From https://www.wikiwand.com/en/FASTA_format
NCBI is a bit stricter with their definition by barring blank line,
but is otherwise still extremely lenient.
(https://blast.ncbi.nlm.nih.gov/Blast.cgi?CMD=Web&PAGE_TYPE=BlastDocs&DOC_TYPE=BlastHelp)
"""
input_file = pytestconfig.rootpath.joinpath("test-data/embeddings.npz")
with pytest.raises(ValueError, match="Are you sure this is a valid fasta file?"):
_process_fasta_file(sequences_file=input_file, prefix=str(tmp_path))
def test_missing_fasta(tmp_path: Path):
input_file = tmp_path.joinpath("non_existant.fasta")
with pytest.raises(FileNotFoundError, match="No such file or directory"):
_process_fasta_file(sequences_file=input_file, prefix=str(tmp_path))
| StarcoderdataPython |
3505142 | ## @defgroup Analyses-Sizing Sizing
# This is the analysis that controls vehicle sizing evaluations.
# @ingroup Analyses
from . import Sizing
| StarcoderdataPython |
1910905 | ROW = "R"
MOUND = "M"
POT = "P"
TRELLIS = "T"
SCATTER = "S"
GROW_STYLE = [
(ROW, "in rows"),
(MOUND, "in mounds"),
(POT, "in pots"),
(TRELLIS, "on trellises"),
(SCATTER, "by scattering"),
]
GROW_STYLE_FORMATTED = [
(ROW, "plant in rows"),
(MOUND, "plant in mounds"),
(POT, "plant in pots"),
(TRELLIS, "plant along trellises"),
(SCATTER, "scatter plant"),
]
FIRST_FROST = "F"
LAST_FROST = "L"
FROST = [
(FIRST_FROST, "First frost"),
(LAST_FROST, "Last frost")
]
INSIDE = "I"
OUTSIDE = "O"
LOCATION = [
(INSIDE, "Start inside"),
(OUTSIDE, "Direct sow")
]
| StarcoderdataPython |
5046903 | <filename>source/scripts/python/web/source/web.py
#!/usr/bin/python3.5
from os import path
from datetime import datetime
def index():
basepath = path.dirname(path.abspath(__file__))
with open(path.join(basepath, 'index.html'), 'r') as f:
return f.read()
def time():
return datetime.now().strftime('%Y-%m-%d %H:%M:%S')
| StarcoderdataPython |
1796511 | <gh_stars>100-1000
#! ../usr/bin/python2
# import module
import subprocess,time,random,sys
# colour
G = "\033[32m"; O = "\033[33m"; B = "\033[34m"; R = "\033[31m"; W = "\033[0m";
# random.colour
x = "\033["
color = (x+"0m",x+"31m",x+"32m",x+"34m",x+"35m",x+"36m")
Z = random.choice(color)
r = random.choice(color)
def logo():
# platform
if sys.platform == 'linux' or sys.platform == 'linux2':
subprocess.call("clear", shell=True)
time.sleep(1)
else:
subprocess.call("cls", shell=True)
time.sleep(1)
# logo
print r+" _ _ _ ____"
print r+" | | | | |_ _ __ ___ | _ \ "
print r+" | |_| | __| '_ ` _ \| | | |"
print r+" | _ | |_| | | | | | |_| |"
print r+" |_| |_|\__|_| |_| |_|____/"
# Mengganti Author Tidak akan membuat anda menjadi PRO
print R+"\n 0={==> "+W+"Html Downloader"+R+" <==}=0\n"
print R+"[+]"+O+" author : "+Z+"<NAME> "+O+"a.k.a"+Z+" XnVer404"
print R+"[+]"+O+" thanks to "+Z+"OneSec Onz "+O+"and"+Z+" Null\n"
use()
def use():
website = raw_input(B+"[?]"+G+" website : "+O)
output = raw_input(B+"[?]"+G+" file output ("+B+"ex: hasil.htm"+G+") : "+O)
time.sleep(1)
print R+"[+] Curl started . . .\n"+G
time.sleep(1)
subprocess.call("curl "+website+" -o "+output,shell=True)
time.sleep(1)
print B+"\n[*]"+G+" File save : "+O+output
exit()
print logo()
| StarcoderdataPython |
6521844 | <reponame>NTX-McGill/NeuroTechX-McGill-2021<filename>software/speller/data_collection_platform/backend/dcp/signals/predict.py<gh_stars>1-10
from FBCCA_IT import filter_bank_cca_it
import numpy as np
import json
from scipy.signal import filtfilt, cheby1
def predict_letter(bci_data, subject_id='S08'):
prediction = None
# bci_data # 8 channels of x seconds data, sampling rate = 250Hz, then shape = (250x, 8)
# parameters
corr = []
sampling_rate = 250.0
low_bound_freq = 5.5
upper_bound_freq = 54.0
num_harmonics = 5 # parameter of FBCCA
onset = 80 # remove visual latency and head
# dummy = np.random.rand(500, 8) # 8 channels of 2s data, sampling rate = 250Hz
# Template for each subject is stored locally, will be loaded as matlab array (for convenience) before inference.
# Could be implemented differently
# template is basically averaged data previously collected given the same stimulus frequency
template = np.load(f'{subject_id}_template.npy', allow_pickle=True).item()
"""
with open("keyboard_config.json") as fp:
keyboard_dict = json.load(fp)
freq_letter_dict = {
float(v_dict['frequency']): k for k, v_dict in sorted(keyboard_dict.items())
}
freq_letter_dict = {k: v for k,v in sorted(freq_letter_dict.items())}
with open("freq_letter_map.json", 'w') as outfile:
json.dump(freq_letter_dict, outfile)
return
"""
with open("freq_letter_map.json") as fp:
freq_letter_dict = json.load(fp)
# print(freq_letter_dict)
signal_len = np.shape(bci_data)[0]
for frequency in list(freq_letter_dict.keys()): # Do FBCCA for every frequency, find the one with max corr
bci_data -= np.nanmean(bci_data, axis=0)
beta, alpha = cheby1(N=2, rp=0.3, Wn=[5.5 / 125.0, 54.0 / 125.0], btype='band', output='ba')
bci_data = filtfilt(beta, alpha, bci_data.T).T
rho = filter_bank_cca_it(bci_data, float(frequency), low_bound_freq, upper_bound_freq, num_harmonics,
template.get(float(frequency)).astype(float)[:signal_len, :], sampling_rate)
corr.append(rho)
prediction_index = np.argmax(corr)
predicted_letter = freq_letter_dict.get(list(freq_letter_dict.keys())[prediction_index])
return predicted_letter
if __name__ == '__main__':
predict_letter(np.random.rand(1000, 8))
| StarcoderdataPython |
6629988 | <filename>picmodels/migrations/0051_healthcareserviceexpertise.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('picmodels', '0050_auto_20180306_1502'),
]
operations = [
migrations.CreateModel(
name='HealthcareServiceExpertise',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, primary_key=True, auto_created=True)),
('name', models.CharField(unique=True, max_length=1000)),
],
),
]
| StarcoderdataPython |
3210887 | <reponame>gokul-h/rentacar<gh_stars>0
# Generated by Django 3.2.9 on 2021-11-11 08:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='carorder',
name='weeks',
field=models.IntegerField(default=0),
),
]
| StarcoderdataPython |
5189078 | <filename>simple_elasticsearch/tests.py<gh_stars>10-100
import copy
from datadiff import tools as ddtools
from django.core.paginator import Page
from django.test import TestCase
from elasticsearch import Elasticsearch
import mock
try:
# `reload` is not a python3 builtin like python2
reload
except NameError:
from imp import reload
from . import settings as es_settings
from .search import SimpleSearch
from .mixins import ElasticsearchTypeMixin
from .models import Blog, BlogPost
class ElasticsearchTypeMixinClass(ElasticsearchTypeMixin):
pass
class ElasticsearchTypeMixinTestCase(TestCase):
@property
def latest_post(self):
return BlogPost.objects.select_related('blog').latest('id')
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.delete')
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.index')
def setUp(self, mock_index, mock_delete):
self.blog = Blog.objects.create(
name='test blog name',
description='test blog description'
)
# hack the return value to ensure we save some BlogPosts here;
# without this mock, the post_save handler indexing blows up
# as there is no real ES instance running
mock_index.return_value = mock_delete.return_value = {}
post = BlogPost.objects.create(
blog=self.blog,
title="DO-NOT-INDEX title",
slug="DO-NOT-INDEX",
body="DO-NOT-INDEX body"
)
for x in range(1, 10):
BlogPost.objects.create(
blog=self.blog,
title="blog post title {0}".format(x),
slug="blog-post-title-{0}".format(x),
body="blog post body {0}".format(x)
)
def test__get_es__with_default_settings(self):
result = BlogPost.get_es()
self.assertIsInstance(result, Elasticsearch)
self.assertEqual(result.transport.hosts[0]['host'], '127.0.0.1')
self.assertEqual(result.transport.hosts[0]['port'], 9200)
def test__get_es__with_custom_server(self):
# include a custom class here as the internal `_es` is cached, so can't reuse the
# `ElasticsearchIndexClassDefaults` global class (see above).
class ElasticsearchIndexClassCustomSettings(ElasticsearchTypeMixin):
pass
with self.settings(ELASTICSEARCH_SERVER=['search.example.com:9201']):
reload(es_settings)
result = ElasticsearchIndexClassCustomSettings.get_es()
self.assertIsInstance(result, Elasticsearch)
self.assertEqual(result.transport.hosts[0]['host'], 'search.example.com')
self.assertEqual(result.transport.hosts[0]['port'], 9201)
reload(es_settings)
def test__get_es__with_custom_connection_settings(self):
# include a custom class here as the internal `_es` is cached, so can't reuse the
# `ElasticsearchIndexClassDefaults` global class (see above).
class ElasticsearchIndexClassCustomSettings(ElasticsearchTypeMixin):
pass
with self.settings(ELASTICSEARCH_CONNECTION_PARAMS={'hosts': ['search2.example.com:9202'], 'sniffer_timeout': 15}):
reload(es_settings)
result = ElasticsearchIndexClassCustomSettings.get_es()
self.assertIsInstance(result, Elasticsearch)
self.assertEqual(result.transport.hosts[0]['host'], 'search2.example.com')
self.assertEqual(result.transport.hosts[0]['port'], 9202)
self.assertEqual(result.transport.sniffer_timeout, 15)
reload(es_settings)
@mock.patch('simple_elasticsearch.mixins.ElasticsearchTypeMixin.index_add_or_delete')
def test__save_handler(self, mock_index_add_or_delete):
# with a create call
post = BlogPost.objects.create(
blog=self.blog,
title="blog post title foo",
slug="blog-post-title-foo",
body="blog post body foo"
)
mock_index_add_or_delete.assert_called_with(post)
mock_index_add_or_delete.reset_mock()
# with a plain save call
post.save()
mock_index_add_or_delete.assert_called_with(post)
@mock.patch('simple_elasticsearch.mixins.ElasticsearchTypeMixin.index_delete')
def test__delete_handler(self, mock_index_delete):
post = self.latest_post
post.delete()
mock_index_delete.assert_called_with(post)
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.index')
def test__index_add(self, mock_index):
post = self.latest_post
mock_index.return_value = {}
# make sure an invalid object passed in returns False
result = BlogPost.index_add(None)
self.assertFalse(result)
# make sure indexing an item calls Elasticsearch.index() with
# the correct variables, with normal index name
result = BlogPost.index_add(post)
self.assertTrue(result)
mock_index.assert_called_with('blog', 'posts', BlogPost.get_document(post), post.pk, routing=1)
# make sure indexing an item calls Elasticsearch.index() with
# the correct variables, with non-standard index name
result = BlogPost.index_add(post, 'foo')
self.assertTrue(result)
mock_index.assert_called_with('foo', 'posts', BlogPost.get_document(post), post.pk, routing=1)
# this one should not index (return false) because the
# 'should_index' for this post should make it skip it
post = BlogPost.objects.get(slug="DO-NOT-INDEX")
result = BlogPost.index_add(post)
self.assertFalse(result)
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.delete')
def test__index_delete(self, mock_delete):
post = self.latest_post
mock_delete.return_value = {
"acknowledged": True
}
# make sure an invalid object passed in returns False
result = BlogPost.index_delete(None)
self.assertFalse(result)
# make sure deleting an item calls Elasticsearch.delete() with
# the correct variables, with normal index name
result = BlogPost.index_delete(post)
self.assertTrue(result)
mock_delete.assert_called_with('blog', 'posts', post.pk, routing=1)
# make sure deleting an item calls Elasticsearch.delete() with
# the correct variables, with non-standard index name
result = BlogPost.index_delete(post, 'foo')
self.assertTrue(result)
mock_delete.assert_called_with('foo', 'posts', post.pk, routing=1)
@mock.patch('simple_elasticsearch.mixins.ElasticsearchTypeMixin.index_add')
@mock.patch('simple_elasticsearch.mixins.ElasticsearchTypeMixin.index_delete')
def test__index_add_or_delete(self, mock_index_delete, mock_index_add):
# invalid object passed in, should return False
result = BlogPost.index_add_or_delete(None)
self.assertFalse(result)
# this one should not index (return false) because the
# `should_index` for this post should make it skip it;
# `index_delete` should get called
mock_index_delete.return_value = True
post = BlogPost.objects.get(slug="DO-NOT-INDEX")
result = BlogPost.index_add_or_delete(post)
self.assertTrue(result)
mock_index_delete.assert_called_with(post, '')
result = BlogPost.index_add_or_delete(post, 'foo')
self.assertTrue(result)
mock_index_delete.assert_called_with(post, 'foo')
# `index_add` call results below
mock_index_add.return_value = True
post = self.latest_post
result = BlogPost.index_add_or_delete(post)
self.assertTrue(result)
mock_index_add.assert_called_with(post, '')
result = BlogPost.index_add_or_delete(post, 'foo')
self.assertTrue(result)
mock_index_add.assert_called_with(post, 'foo')
def test__get_index_name(self):
self.assertEqual(BlogPost.get_index_name(), 'blog')
def test__get_type_name(self):
self.assertEqual(BlogPost.get_type_name(), 'posts')
def test__get_queryset(self):
queryset = BlogPost.objects.all().select_related('blog').order_by('pk')
self.assertEqual(list(BlogPost.get_queryset().order_by('pk')), list(queryset))
def test__get_index_name_notimplemented(self):
with self.assertRaises(NotImplementedError):
ElasticsearchTypeMixinClass.get_index_name()
def test__get_type_name_notimplemented(self):
with self.assertRaises(NotImplementedError):
ElasticsearchTypeMixinClass.get_type_name()
def test__get_queryset_notimplemented(self):
with self.assertRaises(NotImplementedError):
ElasticsearchTypeMixinClass.get_queryset()
def test__get_type_mapping(self):
mapping = {
"properties": {
"created_at": {
"type": "date",
"format": "dateOptionalTime"
},
"title": {
"type": "string"
},
"body": {
"type": "string"
},
"slug": {
"type": "string"
},
"blog": {
"properties": {
"id": {
"type": "long"
},
"name": {
"type": "string"
},
"description": {
"type": "string"
}
}
}
}
}
self.assertEqual(BlogPost.get_type_mapping(), mapping)
def test__get_type_mapping_notimplemented(self):
self.assertEqual(ElasticsearchTypeMixinClass.get_type_mapping(), {})
def test__get_request_params(self):
post = self.latest_post
# TODO: implement the method to test it works properly
self.assertEqual(BlogPost.get_request_params(post), {'routing':1})
def test__get_request_params_notimplemented(self):
self.assertEqual(ElasticsearchTypeMixinClass.get_request_params(1), {})
def test__get_bulk_index_limit(self):
self.assertTrue(str(BlogPost.get_bulk_index_limit()).isdigit())
def test__get_query_limit(self):
self.assertTrue(str(BlogPost.get_query_limit()).isdigit())
def test__get_document_id(self):
post = self.latest_post
result = BlogPost.get_document_id(post)
self.assertEqual(result, post.pk)
def test__get_document(self):
post = self.latest_post
result = BlogPost.get_document(post)
self.assertEqual(result, {
'title': post.title,
'slug': post.slug,
'blog': {
'id': post.blog.pk,
'description': post.blog.description,
'name': post.blog.name
},
'created_at': post.created_at,
'body': post.body
})
def test__get_document_notimplemented(self):
with self.assertRaises(NotImplementedError):
ElasticsearchTypeMixinClass.get_document(1)
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.index')
def test__should_index(self, mock_index):
post = self.latest_post
self.assertTrue(BlogPost.should_index(post))
mock_index.return_value = {}
post = BlogPost.objects.get(slug="DO-NOT-INDEX")
self.assertFalse(BlogPost.should_index(post))
def test__should_index_notimplemented(self):
self.assertTrue(ElasticsearchTypeMixinClass.should_index(1))
@mock.patch('simple_elasticsearch.mixins.queryset_iterator')
def test__bulk_index_queryset(self, mock_queryset_iterator):
queryset = BlogPost.get_queryset().exclude(slug='DO-NOT-INDEX')
BlogPost.bulk_index(queryset=queryset)
mock_queryset_iterator.assert_called_with(queryset, BlogPost.get_query_limit(), 'pk')
mock_queryset_iterator.reset_mock()
queryset = BlogPost.get_queryset()
BlogPost.bulk_index()
# to compare QuerySets, they must first be converted to lists.
self.assertEqual(list(mock_queryset_iterator.call_args[0][0]), list(queryset))
mock_queryset_iterator.reset_mock()
# hack in a test for ensuring the proper bulk ordering is used
BlogPost.bulk_ordering = 'created_at'
BlogPost.bulk_index(queryset=queryset)
mock_queryset_iterator.assert_called_with(queryset, BlogPost.get_query_limit(), 'created_at')
BlogPost.bulk_ordering = 'pk'
@mock.patch('simple_elasticsearch.models.BlogPost.get_document')
@mock.patch('simple_elasticsearch.models.BlogPost.should_index')
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.bulk')
def test__bulk_index_should_index(self, mock_bulk, mock_should_index, mock_get_document):
# hack the return value to ensure we save some BlogPosts here;
# without this mock, the post_save handler indexing blows up
# as there is no real ES instance running
mock_bulk.return_value = {}
queryset_count = BlogPost.get_queryset().count()
BlogPost.bulk_index()
self.assertTrue(mock_should_index.call_count == queryset_count)
@mock.patch('simple_elasticsearch.models.BlogPost.get_document')
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.bulk')
def test__bulk_index_get_document(self, mock_bulk, mock_get_document):
mock_bulk.return_value = mock_get_document.return_value = {}
queryset_count = BlogPost.get_queryset().count()
BlogPost.bulk_index()
# One of the items is not meant to be indexed (slug='DO-NOT-INDEX'), so the
# get_document function will get called one less time due to this.
self.assertTrue(mock_get_document.call_count == (queryset_count - 1))
@mock.patch('simple_elasticsearch.mixins.Elasticsearch.bulk')
def test__bulk_index_bulk(self, mock_bulk):
mock_bulk.return_value = {}
queryset_count = BlogPost.get_queryset().count()
BlogPost.bulk_index()
# figure out how many times es.bulk() should get called in the
# .bulk_index() method and verify it's the same
bulk_times = int(queryset_count / BlogPost.get_bulk_index_limit()) + 1
self.assertTrue(mock_bulk.call_count == bulk_times)
class SimpleSearchTestCase(TestCase):
def setUp(self):
self.query = {'q': 'python'}
def test__esp_reset(self):
esp = SimpleSearch()
self.assertTrue(len(esp.bulk_search_data) == 0)
self.assertTrue(len(esp.page_ranges) == 0)
esp.add_search({
"query": {
"match": {
"_all": "foobar"
}
}
})
self.assertFalse(len(esp.bulk_search_data) == 0)
self.assertFalse(len(esp.page_ranges) == 0)
esp.reset()
self.assertTrue(len(esp.bulk_search_data) == 0)
self.assertTrue(len(esp.page_ranges) == 0)
def test__esp_add_query_dict(self):
esp = SimpleSearch()
page = 1
page_size = 20
query = {
"query": {
"match": {
"_all": "foobar"
}
}
}
# SimpleSearch internally sets the from/size parameters
# on the query; we need to compare with those values included
query_with_size = query.copy()
query_with_size.update({
'from': (page - 1) * page_size,
'size': page_size
})
esp.add_search(query.copy())
ddtools.assert_equal(esp.bulk_search_data[0], {})
ddtools.assert_equal(esp.bulk_search_data[1], query_with_size)
esp.reset()
esp.add_search(query.copy(), index='blog')
ddtools.assert_equal(esp.bulk_search_data[0], {'index': 'blog'})
ddtools.assert_equal(esp.bulk_search_data[1], query_with_size)
esp.reset()
esp.add_search(query.copy(), index='blog', doc_type='posts')
ddtools.assert_equal(esp.bulk_search_data[0], {'index': 'blog', 'type': 'posts'})
ddtools.assert_equal(esp.bulk_search_data[1], query_with_size)
@mock.patch('simple_elasticsearch.search.Elasticsearch.msearch')
def test__esp_search(self, mock_msearch):
mock_msearch.return_value = {
"responses": [
{
"hits": {
"total": 20,
"hits": [
{
"_index": "blog",
"_type": "posts",
"_id": "1",
"_score": 1.0,
"_source": {"account_number": 1,}
}, {
"_index": "blog",
"_type": "posts",
"_id": "6",
"_score": 1.0,
"_source": {"account_number": 6,}
}
]
}
}
]
}
esp = SimpleSearch()
esp.add_search({}, 3, 2, index='blog', doc_type='posts')
bulk_data = copy.deepcopy(esp.bulk_search_data)
ddtools.assert_equal(bulk_data, [{'index': 'blog', 'type': 'posts'}, {'from': 4, 'size': 2}])
responses = esp.search()
mock_msearch.assert_called_with(bulk_data)
# ensure that our hack to get size and from into the hit
# data works
self.assertEqual(responses[0]._page_num, 3)
self.assertEqual(responses[0]._page_size, 2)
# ensure that the bulk data gets reset
self.assertEqual(len(esp.bulk_search_data), 0)
page = responses[0].page
self.assertIsInstance(page, Page)
self.assertEqual(page.number, 3)
self.assertTrue(page.has_next())
self.assertTrue(page.has_previous())
self.assertEqual(len(list(page)), 2) # 2 items on the page
@mock.patch('simple_elasticsearch.search.Elasticsearch.msearch')
def test__esp_search2(self, mock_msearch):
mock_msearch.return_value = {
"responses": [
{
"hits": {
"total": 20,
"hits": [
{
"_index": "blog",
"_type": "posts",
"_id": "1",
"_score": 1.0,
"_source": {"account_number": 1,}
}, {
"_index": "blog",
"_type": "posts",
"_id": "6",
"_score": 1.0,
"_source": {"account_number": 6,}
}
]
}
}
]
}
esp = SimpleSearch()
esp.add_search({}, 1, 2, index='blog', doc_type='posts')
responses = esp.search()
page = responses[0].page
self.assertTrue(page.has_next())
self.assertFalse(page.has_previous())
| StarcoderdataPython |
3292820 | from .bootstrap import Bootstrap
from .linear import LinearGaussianObservations
from .base import Proposal
from .linearized import Linearized
from .local_linearization import LocalLinearization
| StarcoderdataPython |
3351975 | <reponame>uri-yanover/sharik
from setuptools import setup, find_packages
setup(
name="sharik",
version="0.4.1",
packages=find_packages(),
description='A shar(1)-like utility with a programmatic fine-tuned API',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/uri-yanover/sharik',
entry_points={
'console_scripts': [
'sharik = sharik.cli:cli_main'
]
},
install_requires=['click', "dataclasses ; python_version<='3.6'", 'pydantic']
) | StarcoderdataPython |
319682 | <reponame>shaddyx/scopeton
import logging
from threading import RLock
import typing
from scopeton import compat, glob
from scopeton.objects import Bean
from scopeton.qualifier_tree import QualifierTree
from scopeton.scopeTools import getBean_qualifier, callMethodByName, getClassTree, flatten, ScopetonException
T = typing.TypeVar("T")
class Scope(object):
'''this is servicelocator pattern implementation'''
def __init__(self, lock=False, initMethod="postConstruct", destroyMethod="preDestroy", parent=None):
self._singletons = QualifierTree()
self._beans = QualifierTree()
self.lock = lock or RLock() # type: RLock
self.initMethod = initMethod
self.destroyMethod = destroyMethod
self.parent = parent #type: Scope
self.servicesStarted = False
self.children = [] # type: typing.List[Scope]
self.registerInstance(self.__class__, self)
if parent:
parent.children.append(self)
def getInstance(self, name: typing.Type[T]) -> T:
with self.lock:
return self._getInstance(getBean_qualifier(name))
def getInstances(self, qualifier: typing.Type[T]) -> typing.List[T]:
with self.lock:
beans = self._beans.find_by_qualifier_name(getBean_qualifier(qualifier))
beans = map(lambda x: self.getInstance(x), beans)
return list(beans)
def _getInstance(self, qualifier):
suitableQualifier = self._beans.find_suitable_qualifier(qualifier)
if self._singletons.contains(suitableQualifier):
return self._singletons.find_one_by_qualifier_name(suitableQualifier)
bean = self._beans.find_one_by_qualifier_name(suitableQualifier)
glob.lastScope = self
if compat.hasInject(bean.cls.__init__):
instance = bean.cls()
elif len(compat.getMethodSignature(bean.cls.__init__).args) == 2:
instance = bean.cls(self)
elif len(compat.getMethodSignature(bean.cls.__init__).args) > 2:
raise ScopetonException("Invalid number of parameters for bean constructor, maybe @Inject() decorator forgotten: {}".format(compat.getMethodSignature(bean.cls.__init__).args))
else:
instance = bean.cls()
if bean.singleton:
self.registerInstance(suitableQualifier, instance)
return instance
def registerInstance(self, name, instance):
qualifier = getBean_qualifier(name)
suitableQualifier = self._beans.find_suitable_qualifier(qualifier)
logging.debug("Suitable qualifier for {} is: {}".format(qualifier, suitableQualifier))
self._singletons.register(suitableQualifier, instance)
def registerBean(self, *args):
with self.lock:
for bean in args:
if not isinstance(bean, Bean):
bean = Bean(bean)
self._registerBean(bean)
def remove(self):
logging.debug("Removing scope: {}".format(self))
self.stopServices()
for k in self.children[:]:
k.remove()
if self.parent:
self.parent.children.remove(self)
def _registerBean(self, bean):
"""
:type bean: Bean
"""
for name in bean.qualifier_tree:
logging.debug("Registering: {} as {}".format(name, bean))
self._beans.register(name, bean)
def runServices(self):
if not self.servicesStarted:
self.servicesStarted = True
for bean in self._beans.get_all_objects():
if bean.service:
callMethodByName(self.getInstance(bean), self.initMethod)
for childScope in self.children:
childScope.runServices()
def stopServices(self):
if self.servicesStarted:
self.servicesStarted = False
for bean in self._beans.get_all_objects():
if bean.service:
callMethodByName(self.getInstance(bean), self.destroyMethod)
for childScope in self.children:
childScope.stopServices()
| StarcoderdataPython |
4801394 | __author__ = 'anderson'
import sys
if sys.version_info < (3, 2):
from santos import *
else:
from santos.santos import TaskScheduling, stopjobs
| StarcoderdataPython |
139069 | <filename>scripts/contech_wrapper++.py
#!/usr/bin/env python
# C++ Wrapper compiler for contech front end
import contech_wrapper
contech_wrapper.main(isCpp = True)
| StarcoderdataPython |
8069259 | <gh_stars>0
# -*- coding: utf-8 -*-
from flask_restful import Resource, reqparse, abort
from flask import request
from lista.models.usuario_model import UsuarioModel
#from lista.schemas.schemas import UsuarioSchema
from lista.schemas.usuario_schema import UsuarioSchema
class UsuarioResource(Resource):
parser = reqparse.RequestParser()
parser.add_argument("nome",
type=str,
required=True,
help="O nome do Usuario não pode estar em branco."
)
parser.add_argument('email',
type=str,
required=True,
help="O email do Usuario não pode estar em branco."
)
def get(self,nome):
json = ''
try:
usuario = UsuarioModel.encontrar_pelo_nome(nome)
print(usuario)
if usuario:
schema = UsuarioSchema(exclude=['listas'])
json = schema.dump(usuario)
else:
return {"message":"Usuario {} não existe".format(nome)},404
except Exception as e:
print(e)
return {"message","Erro na requisição".format(nome)},500
return json,200
def post(self):
try:
data = UsuarioResource.parser.parse_args()
if not data:
return {"message": "Requisição sem JSON"}, 400
if UsuarioModel.encontrar_pelo_nome(data['nome']):
return {"message": "Usuário ja existe"}, 400
else:
usuario = UsuarioModel(data['nome'], data['email'])
usuario.adicionar()
usuario = UsuarioModel.encontrar_pelo_nome(data['nome'])
user_schema = UsuarioSchema(exclude=['listas'])
json = user_schema.dump(usuario)
return json, 201
except Exception as ex:
print(ex)
return {"message": "erro"}, 500
def put(self):
json = ''
return json, 201
class UsuariosResource(Resource):
def get(self):
json = ""
try:
usuarios = UsuarioModel.listar()
schema = UsuarioSchema(many=True,exclude=['listas'])
json = schema.dump(usuarios)
except Exception as e:
print(e)
return {"message": "Aconteceu um erro tentando retornar a lista de usuarios."}, 500
return json, 200
| StarcoderdataPython |
291756 | <reponame>csmatar/saleor<gh_stars>1-10
from dataclasses import asdict
from smtplib import SMTPNotSupportedError
from unittest.mock import MagicMock, Mock, patch
import pytest
from django.core.exceptions import ValidationError
from django.core.mail.backends.smtp import EmailBackend
from ....core.notify_events import NotifyEventType
from ....graphql.tests.utils import get_graphql_content
from ...email_common import DEFAULT_EMAIL_VALUE, get_email_template
from ...manager import get_plugins_manager
from ...models import PluginConfiguration
from ..constants import (
CSV_EXPORT_FAILED_TEMPLATE_FIELD,
CSV_PRODUCT_EXPORT_SUCCESS_TEMPLATE_FIELD,
SET_STAFF_PASSWORD_TEMPLATE_FIELD,
STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD,
)
from ..notify_events import (
send_csv_export_failed,
send_csv_product_export_success,
send_set_staff_password_email,
send_staff_order_confirmation,
send_staff_reset_password,
)
from ..plugin import get_admin_event_map
def test_event_map():
assert get_admin_event_map() == {
NotifyEventType.STAFF_ORDER_CONFIRMATION: send_staff_order_confirmation,
NotifyEventType.ACCOUNT_SET_STAFF_PASSWORD: send_set_staff_password_email,
NotifyEventType.CSV_PRODUCT_EXPORT_SUCCESS: send_csv_product_export_success,
NotifyEventType.CSV_EXPORT_FAILED: send_csv_export_failed,
NotifyEventType.ACCOUNT_STAFF_RESET_PASSWORD: send_staff_reset_password,
}
@pytest.mark.parametrize(
"event_type",
[
NotifyEventType.STAFF_ORDER_CONFIRMATION,
NotifyEventType.ACCOUNT_SET_STAFF_PASSWORD,
NotifyEventType.CSV_PRODUCT_EXPORT_SUCCESS,
NotifyEventType.CSV_EXPORT_FAILED,
NotifyEventType.ACCOUNT_STAFF_RESET_PASSWORD,
],
)
@patch("saleor.plugins.admin_email.plugin.get_admin_event_map")
def test_notify(mocked_get_event_map, event_type, admin_email_plugin):
payload = {
"field1": 1,
"field2": 2,
}
mocked_event = Mock()
mocked_get_event_map.return_value = {event_type: mocked_event}
plugin = admin_email_plugin()
plugin.notify(event_type, payload, previous_value=None)
mocked_event.assert_called_with(payload, asdict(plugin.config), plugin)
@patch("saleor.plugins.admin_email.plugin.get_admin_event_map")
def test_notify_event_not_related(mocked_get_event_map, admin_email_plugin):
event_type = NotifyEventType.ACCOUNT_SET_CUSTOMER_PASSWORD
payload = {
"field1": 1,
"field2": 2,
}
mocked_event = Mock()
mocked_get_event_map.return_value = {event_type: mocked_event}
plugin = admin_email_plugin()
plugin.notify(event_type, payload, previous_value=None)
assert not mocked_event.called
@patch("saleor.plugins.admin_email.plugin.get_admin_event_map")
def test_notify_event_missing_handler(mocked_get_event_map, admin_email_plugin):
event_type = NotifyEventType.CSV_EXPORT_FAILED
payload = {
"field1": 1,
"field2": 2,
}
mocked_event_map = MagicMock()
mocked_get_event_map.return_value = mocked_event_map
plugin = admin_email_plugin()
plugin.notify(event_type, payload, previous_value=None)
assert not mocked_event_map.__getitem__.called
@patch("saleor.plugins.admin_email.plugin.get_admin_event_map")
def test_notify_event_plugin_is_not_active(mocked_get_event_map, admin_email_plugin):
event_type = NotifyEventType.CSV_EXPORT_FAILED
payload = {
"field1": 1,
"field2": 2,
}
plugin = admin_email_plugin(active=False)
plugin.notify(event_type, payload, previous_value=None)
assert not mocked_get_event_map.called
def test_save_plugin_configuration_tls_and_ssl_are_mutually_exclusive(
admin_email_plugin,
):
plugin = admin_email_plugin()
configuration = PluginConfiguration.objects.get()
data_to_save = {
"configuration": [
{"name": "use_tls", "value": True},
{"name": "use_ssl", "value": True},
]
}
with pytest.raises(ValidationError):
plugin.save_plugin_configuration(configuration, data_to_save)
@patch.object(EmailBackend, "open")
def test_save_plugin_configuration(mocked_open, admin_email_plugin):
plugin = admin_email_plugin()
configuration = PluginConfiguration.objects.get()
data_to_save = {
"configuration": [
{"name": "use_tls", "value": False},
{"name": "use_ssl", "value": True},
]
}
plugin.save_plugin_configuration(configuration, data_to_save)
mocked_open.assert_called_with()
@patch.object(EmailBackend, "open")
def test_save_plugin_configuration_incorrect_email_backend_configuration(
mocked_open, admin_email_plugin
):
plugin = admin_email_plugin()
mocked_open.side_effect = SMTPNotSupportedError()
configuration = PluginConfiguration.objects.get()
data_to_save = {
"configuration": [
{"name": "use_tls", "value": False},
{"name": "use_ssl", "value": True},
]
}
with pytest.raises(ValidationError):
plugin.save_plugin_configuration(configuration, data_to_save)
@patch.object(EmailBackend, "open")
def test_save_plugin_configuration_incorrect_template(mocked_open, admin_email_plugin):
incorrect_template_str = """
{{#if order.order_details_url}}
Thank you for your order. Below is the list of fulfilled products. To see your
order details please visit:
<a href="{{ order.order_details_url }}">{{ order.order_details_url }}</a>
{{else}}
Thank you for your order. Below is the list of fulfilled products.
{{/if}
""" # missing } at the end of the if condition
plugin = admin_email_plugin()
configuration = PluginConfiguration.objects.get()
data_to_save = {
"configuration": [
{
"name": STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD,
"value": incorrect_template_str,
},
{
"name": SET_STAFF_PASSWORD_TEMPLATE_FIELD,
"value": incorrect_template_str,
},
{
"name": CSV_PRODUCT_EXPORT_SUCCESS_TEMPLATE_FIELD,
"value": incorrect_template_str,
},
{"name": CSV_EXPORT_FAILED_TEMPLATE_FIELD, "value": incorrect_template_str},
]
}
with pytest.raises(ValidationError):
plugin.save_plugin_configuration(configuration, data_to_save)
mocked_open.assert_called_with()
def test_get_email_template(admin_email_plugin, admin_email_template):
plugin = admin_email_plugin()
default = "Default template"
template = get_email_template(plugin, admin_email_template.name, default)
assert template == admin_email_template.value
admin_email_template.delete()
template = get_email_template(plugin, admin_email_template.name, default)
assert template == default
@patch.object(EmailBackend, "open")
def test_save_plugin_configuration_creates_email_template_instance(
mocked_open, admin_email_plugin
):
template_str = """Thank you for your order."""
plugin = admin_email_plugin()
configuration = PluginConfiguration.objects.get()
data_to_save = {
"configuration": [
{
"name": STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD,
"value": template_str,
}
]
}
plugin.save_plugin_configuration(configuration, data_to_save)
configuration.refresh_from_db()
email_template = configuration.email_templates.get()
assert email_template
assert email_template.name == STAFF_ORDER_CONFIRMATION_TEMPLATE_FIELD
assert email_template.value == template_str
QUERY_GET_PLUGIN = """
query Plugin($id: ID!) {
plugin(id: $id) {
id
name
globalConfiguration {
configuration {
name
value
}
}
}
}
"""
def test_configuration_resolver_returns_email_template_value(
staff_api_client,
admin_email_plugin,
admin_email_template,
permission_manage_plugins,
):
plugin = admin_email_plugin()
response = staff_api_client.post_graphql(
QUERY_GET_PLUGIN,
{"id": plugin.PLUGIN_ID},
permissions=(permission_manage_plugins,),
)
content = get_graphql_content(response)
data = content["data"]["plugin"]
email_config_item = None
for config_item in data["globalConfiguration"]["configuration"]:
if config_item["name"] == admin_email_template.name:
email_config_item = config_item
assert email_config_item
assert email_config_item["value"] == admin_email_template.value
def test_plugin_manager_doesnt_load_email_templates_from_db(
admin_email_plugin, admin_email_template, settings
):
settings.PLUGINS = ["saleor.plugins.admin_email.plugin.AdminEmailPlugin"]
manager = get_plugins_manager()
plugin = manager.all_plugins[0]
email_config_item = None
for config_item in plugin.configuration:
if config_item["name"] == admin_email_template.name:
email_config_item = config_item
# Assert that accessing plugin configuration directly from manager doesn't load
# email template from DB but returns default email value.
assert email_config_item
assert email_config_item["value"] == DEFAULT_EMAIL_VALUE
| StarcoderdataPython |
3451439 | import sys
import numpy as np
from Workspace.Interface.MotionCommandInterface import MotionCommandInterface
class MotionState:
STATE_MOVE = 'state_move'
STATE_SHOOT = 'state_shoot'
STATE_STOP = 'state_stop'
class MotionKey:
KEY_TRANSLATION_VECTOR = 'k_trans'
KEY_STATE = 'k_state'
KEY_SHOOT_VECTOR = 'k_shoot_v'
class MotionModel(MotionCommandInterface):
def __init__(self):
self.motion_xy: np.ndarray = np.array([0.0, 0.0])
self.shoot_xy: np.ndarray = np.array([0.0, 1.0])
self.state: str = MotionState.STATE_STOP
def set_acceleration(self, percent: float) -> None:
if not -1. <= percent <= 1.:
self.motion_xy[0] = -1. if percent < 0 else 1.
print('> [WARN] MotionModel.set_speed should be de percentage (-1. to 1.)',
file=sys.stderr)
else:
self.motion_xy[0] = percent
@property
def acceleration(self):
return self.motion_xy[0]
@property
def rotation(self):
return self.motion_xy[1]
def set_rotation(self, percent: float) -> None:
if not -1. <= percent <= 1.:
self.motion_xy[1] = -1. if percent < 0 else 1.
print('> [WARN] MotionModel.set_rotation should be de percentage (-1. to 1.)',
file=sys.stderr)
else:
self.motion_xy[1] = percent
def shoot(self) -> None:
self.state = MotionState.STATE_SHOOT
def move(self) -> None:
self.state = MotionState.STATE_MOVE
def stop(self) -> None:
self.state = MotionState.STATE_STOP
def to_dict(self) -> dict:
return {MotionKey.KEY_TRANSLATION_VECTOR: self.motion_xy.tolist(),
MotionKey.KEY_SHOOT_VECTOR: self.shoot_xy.tolist(),
MotionKey.KEY_STATE: self.state,
}
def from_dict(self, data):
self.motion_xy = np.array(data[MotionKey.KEY_TRANSLATION_VECTOR])
self.shoot_xy = np.array(data[MotionKey.KEY_SHOOT_VECTOR])
self.state = data[MotionKey.KEY_STATE]
return self
def set_shoot_dir(self, shoot_dir: np.ndarray):
self.shoot()
_norm = np.linalg.norm(shoot_dir)
if not _norm:
self.shoot_xy = shoot_dir
else:
self.shoot_xy = shoot_dir / _norm
| StarcoderdataPython |
11354769 | <filename>steadyStateProcessing.py
# USAGE INSTRUCTIONS
# Search document for 'Rr ='
# Set Rr and data source appropriately.
import uncertainties
from uncertainties.umath import *
from math import pi
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# Uncertainty of R is 1%. I've calculated the exact value because it was simple.
# Converted scientific notation in my head.
L = uncertainties.ufloat(0.01525, 0.00002)
C = uncertainties.ufloat_fromstr('9.91(0.02)e-9')
Vs = uncertainties.ufloat(12, 0.1)
# Format is freqency, voltage, voltage_error
raw_data_100 = [(0.1, 0.0175, 0.0025),
(1, 0.081, 0.002),
(5, 0.46, 0.004),
(8, 0.98, 0.004),
(10, 1.9, 0.02),
(12.96, 6, 0.08),
(17, 1.52, 0.02),
(20, 0.98, 0.02),
(12, 5.28, 0.08),
(14, 3.84, 0.02),
(30, 0.416, 0.008),
(40, 0.248, 0.004),
(60, 0.068, 0.002),
(80, 0.064, 0.002),
(100, 0.158, 0.002),
(13.54, 4.76, 0.04),
(11, 3.08, 0.04),
(11.52, 4.96, 0.04),
(11.26, 3.48, 0.04),
(11.435, 3.8, 0.04),
(11.6, 4.2, 0.04)]
raw_data_1000 = [(0.1, 0.092, 0.002),
(1, 0.728, 0.002),
(5, 4.2, 0.04),
(10, 9.64, 0.04),
(11, 10.5, 0.1),
(12, 10.95, 0.05),
(30, 3.82, 0.02),
(13, 11, 0.1),
(8, 7.3, 0.02),
(14, 10.75, 0.05),
(15, 10.25, 0.05),
(20, 7.16, 0.04),
(40, 2.44, 0.04),
(60, 0.588, 0.004),
(80, 0.556, 0.004),
(100, 1.41, 0.005),
(90, 1.05, 0.01)]
class SSDataPoint:
def __init__(self, frequency, voltage, voltage_error, resistance):
self.R = uncertainties.ufloat(resistance, 0.01*resistance)
self.f = uncertainties.ufloat(frequency*1000, 0.01*1000*frequency)
self.v = uncertainties.ufloat(voltage, voltage_error)
self.w = 2 * pi * self.f
# Current calculate from measurements
self.I_m = self.v / self.R
# Theoretical current
Xc = 1/(self.w * C)
Xl = self.w * L
Xd = Xl-Xc
Xq = pow(Xd, 2)
Rq = pow(self.R, 2)
denom = sqrt(Rq+Xq)
# self.I_t = Vs / sqrt( self.R**2 + (self.w*L-(1/(self.w*C)))**2 )
self.I_t = Vs / denom
def __str__(self):
return 'Frequency: {} | Voltage: {} | AngFreq: {} | I Meas {} | I Theor {}'.format(self.f, self.v, self.w, self.I_m, self.I_t)
def process_raw(raw, resistance):
# Quick and dirty check to make sure resistance and dataset go together
if len(raw) == len(raw_data_1000) and Rr != 1000:
raise Exception('Wrong dataset')
elif len(raw) == len(raw_data_100) and Rr != 100:
raise Exception('Wrong dataset')
processed = set()
for point in raw:
processed.add(SSDataPoint(frequency=point[0], voltage=point[1], voltage_error=point[2], resistance=resistance))
return processed
def tabularise(data):
w = []
Im = []
It = []
w_er = []
Im_er = []
It_er = []
for point in data:
w.append(point.w.nominal_value)
Im.append(point.I_m.nominal_value)
It.append(point.I_t.nominal_value)
w_er.append(point.w.std_dev)
Im_er.append(point.I_m.std_dev)
It_er.append(point.I_t.std_dev)
table = pd.DataFrame(data={'w (Hz)': w, 'Im (A)': Im, 'It (A)': It, 'w error': w_er, 'Im error': Im_er, 'It error': It_er})
table = table.sort_values(by=['w (Hz)'])
print(table)
return table
# Look over data
# Calling it Rr so it won't cause bugs if it still floating around somewhere
# as R. Messy but effective.
Rr = 100
if Rr == 100:
data = process_raw(raw_data_100, Rr)
elif Rr == 1000:
data = process_raw(raw_data_1000, Rr)
else:
raise Exception('Invalid resistance')
# for point in data:
# print(point)
table = tabularise(data)
w = table.get('w (Hz)')
Im = table.get('Im (A)')
It = table.get('It (A)')
w_er = table.get('w error')
Im_er = table.get('Im error')
It_er = table.get('It error')
plt.errorbar(x=w, y=Im, xerr=w_er, yerr=Im_er, ecolor='black', fmt='.-b', capsize=2, label='Measured Current')
plt.errorbar(x=w, y=It, xerr=w_er, yerr=It_er, ecolor='black', fmt='.-g', capsize=2, label='Theoretical Current')
plt.title(label='I vs w for R={}'.format(Rr))
plt.legend(loc='upper right')
plt.xlabel('w (Hz)')
plt.ylabel('I (A)')
plt.show()
| StarcoderdataPython |
4868940 | from model import Model, gen_batches_by_keys, gen_batches_by_size
from train import train_model
| StarcoderdataPython |
156711 |
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
class TestClass:
def test_import(self):
import superpoint
def test_DeepFNet(self):
from deepFEPE.models.DeepFNet import main
main()
def test_ErrorEstimators(self):
from deepFEPE.models.ErrorEstimators import main
main()
def test_dataloader(self):
import torch
import logging
import yaml
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
logging.info('train on device: %s', device)
config_file = 'deepFEPE/configs/kitti_corr_baseline.yaml'
with open(config_file, 'r') as f:
config = yaml.load(f)
val = 'val'
from deepFEPE.utils.loader import dataLoader
data = dataLoader(config, dataset='kitti_odo_corr', val=val, warp_input=True, val_shuffle=False)
train_loader, val_loader = data['train_loader'], data['val_loader']
logging.info('+++[Dataset]+++ train split size %d in %d batches, val split size %d in %d batches'%\
(len(train_loader)*config['data']['batch_size'], len(train_loader), len(val_loader)*config['data']['batch_size'], len(val_loader)))
def test_one(self):
x = "this"
assert "h" in x
def test_two(self):
x = "hello"
# assert hasattr(x, "check")
| StarcoderdataPython |
6693692 | <reponame>vincenttran-msft/azure-sdk-for-python
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import CdnManagementClientConfiguration
from .operations import AFDCustomDomainsOperations, AFDEndpointsOperations, AFDOriginGroupsOperations, AFDOriginsOperations, AFDProfilesOperations, CdnManagementClientOperationsMixin, CustomDomainsOperations, EdgeNodesOperations, EndpointsOperations, LogAnalyticsOperations, ManagedRuleSetsOperations, Operations, OriginGroupsOperations, OriginsOperations, PoliciesOperations, ProfilesOperations, ResourceUsageOperations, RoutesOperations, RuleSetsOperations, RulesOperations, SecretsOperations, SecurityPoliciesOperations, ValidateOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class CdnManagementClient(CdnManagementClientOperationsMixin):
"""Cdn Management Client.
:ivar afd_profiles: AFDProfilesOperations operations
:vartype afd_profiles: azure.mgmt.cdn.aio.operations.AFDProfilesOperations
:ivar afd_custom_domains: AFDCustomDomainsOperations operations
:vartype afd_custom_domains: azure.mgmt.cdn.aio.operations.AFDCustomDomainsOperations
:ivar afd_endpoints: AFDEndpointsOperations operations
:vartype afd_endpoints: azure.mgmt.cdn.aio.operations.AFDEndpointsOperations
:ivar afd_origin_groups: AFDOriginGroupsOperations operations
:vartype afd_origin_groups: azure.mgmt.cdn.aio.operations.AFDOriginGroupsOperations
:ivar afd_origins: AFDOriginsOperations operations
:vartype afd_origins: azure.mgmt.cdn.aio.operations.AFDOriginsOperations
:ivar routes: RoutesOperations operations
:vartype routes: azure.mgmt.cdn.aio.operations.RoutesOperations
:ivar rule_sets: RuleSetsOperations operations
:vartype rule_sets: azure.mgmt.cdn.aio.operations.RuleSetsOperations
:ivar rules: RulesOperations operations
:vartype rules: azure.mgmt.cdn.aio.operations.RulesOperations
:ivar security_policies: SecurityPoliciesOperations operations
:vartype security_policies: azure.mgmt.cdn.aio.operations.SecurityPoliciesOperations
:ivar secrets: SecretsOperations operations
:vartype secrets: azure.mgmt.cdn.aio.operations.SecretsOperations
:ivar validate: ValidateOperations operations
:vartype validate: azure.mgmt.cdn.aio.operations.ValidateOperations
:ivar log_analytics: LogAnalyticsOperations operations
:vartype log_analytics: azure.mgmt.cdn.aio.operations.LogAnalyticsOperations
:ivar profiles: ProfilesOperations operations
:vartype profiles: azure.mgmt.cdn.aio.operations.ProfilesOperations
:ivar endpoints: EndpointsOperations operations
:vartype endpoints: azure.mgmt.cdn.aio.operations.EndpointsOperations
:ivar origins: OriginsOperations operations
:vartype origins: azure.mgmt.cdn.aio.operations.OriginsOperations
:ivar origin_groups: OriginGroupsOperations operations
:vartype origin_groups: azure.mgmt.cdn.aio.operations.OriginGroupsOperations
:ivar custom_domains: CustomDomainsOperations operations
:vartype custom_domains: azure.mgmt.cdn.aio.operations.CustomDomainsOperations
:ivar resource_usage: ResourceUsageOperations operations
:vartype resource_usage: azure.mgmt.cdn.aio.operations.ResourceUsageOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.cdn.aio.operations.Operations
:ivar edge_nodes: EdgeNodesOperations operations
:vartype edge_nodes: azure.mgmt.cdn.aio.operations.EdgeNodesOperations
:ivar policies: PoliciesOperations operations
:vartype policies: azure.mgmt.cdn.aio.operations.PoliciesOperations
:ivar managed_rule_sets: ManagedRuleSetsOperations operations
:vartype managed_rule_sets: azure.mgmt.cdn.aio.operations.ManagedRuleSetsOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Azure Subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = CdnManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.afd_profiles = AFDProfilesOperations(self._client, self._config, self._serialize, self._deserialize)
self.afd_custom_domains = AFDCustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.afd_endpoints = AFDEndpointsOperations(self._client, self._config, self._serialize, self._deserialize)
self.afd_origin_groups = AFDOriginGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.afd_origins = AFDOriginsOperations(self._client, self._config, self._serialize, self._deserialize)
self.routes = RoutesOperations(self._client, self._config, self._serialize, self._deserialize)
self.rule_sets = RuleSetsOperations(self._client, self._config, self._serialize, self._deserialize)
self.rules = RulesOperations(self._client, self._config, self._serialize, self._deserialize)
self.security_policies = SecurityPoliciesOperations(self._client, self._config, self._serialize, self._deserialize)
self.secrets = SecretsOperations(self._client, self._config, self._serialize, self._deserialize)
self.validate = ValidateOperations(self._client, self._config, self._serialize, self._deserialize)
self.log_analytics = LogAnalyticsOperations(self._client, self._config, self._serialize, self._deserialize)
self.profiles = ProfilesOperations(self._client, self._config, self._serialize, self._deserialize)
self.endpoints = EndpointsOperations(self._client, self._config, self._serialize, self._deserialize)
self.origins = OriginsOperations(self._client, self._config, self._serialize, self._deserialize)
self.origin_groups = OriginGroupsOperations(self._client, self._config, self._serialize, self._deserialize)
self.custom_domains = CustomDomainsOperations(self._client, self._config, self._serialize, self._deserialize)
self.resource_usage = ResourceUsageOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.edge_nodes = EdgeNodesOperations(self._client, self._config, self._serialize, self._deserialize)
self.policies = PoliciesOperations(self._client, self._config, self._serialize, self._deserialize)
self.managed_rule_sets = ManagedRuleSetsOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "CdnManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
| StarcoderdataPython |
3330148 | <reponame>Syk326/PythonTest<filename>classes-working.py
#!/usr/bin/env python3
# classes.py by <NAME> [http://bw.org/]
# This is an exercise file from Python 3 Essential Training on lynda.com
# Copyright 2010 The BearHeart Group, LLC
# super/parent class
class Animal:
def talk(self): print('I have something to say')
def walk(self): print("I'm walkin' here!")
def clothes(self): print('I have nice clothes')
# inherits Animal
class Duck(Animal):
# constructor, takes any keyword arg
def __init__(self, **kwargs):
self.variables = kwargs
def quack(self):
print('Quaaack!')
def walk(self):
# access function in parent class
super().walk()
print('Walks like a duck.')
def fur(self):
print('This duck be furless')
def bark(self):
print('The duck doesn\'t bark')
# getters and setters, scalable
# k = variable name
# v = variable value
def set_variable(self, k, v):
self.variables[k] = v
def get_variable(self, k):
return self.variables.get(k, None)
# inherits Animal
class Dog(Animal):
def quack(self):
print('The dog doesn\'t quack')
def walk(self):
print('Walks like a dog')
def fur(self):
print('I have golden fur')
def bark(self):
print('Wooooof!')
def clothes(self):
print('I wear a golden fur coat')
def main():
# set whatever variable you like
donald = Duck(feet = 2)
# call those object methods
donald.quack()
donald.walk()
# not set
print(donald.get_variable('color'))
print(donald.get_variable('feet'))
# stores object data in dictionary objects, lots of flexibility
# use a lot of diff data, lot of flags and attributes, save to db, scale
# CONTROL!!
donald.set_variable('color', 'blue')
# now has been set
print(donald.get_variable('color'))
spike = Dog()
spike.clothes()
# will run regardless of object type, as long as function is there
for o in (donald, spike):
o.quack()
o.walk()
o.bark()
o.fur()
# see below
in_the_forest(donald)
in_the_pond(spike)
# takes any object not just dog, "duck-typing" (if walks & quacks like a duck)
def in_the_forest(dog):
dog.bark()
dog.fur()
# takes any object not just dog, "duck-typing" (if walks & quacks like a duck)
def in_the_pond(duck):
duck.quack()
duck.walk()
if __name__ == "__main__": main()
| StarcoderdataPython |
3215249 | #!/usr/bin/env python
""" This example shows hoe the GLSL program for a 2D texture
can be modified to achieve all kinds of graphic effects.
In this example we will demonstrate sharpening an image using
unsharp masking.
"""
import visvis as vv
import numpy as np
# First define our part of the shading code
# The '>>' Denote what piece of code we want to replace.
# There is a pre-loop section, that is executed before the anti-aliasing
# loop. We use this to modify the aliasing kernel so it always does a fixed
# amount of smoothing.
# In the post-loop section we combine the subtracted image with the normal
# sampled image. Unsharp masking consists of subtracting the smoothed image
# from the original (thus removing low frequency components), and then adding
# the result with a certain factor to the original.
#
# Note that the aa kernel is symetric; kernel[0] is the center pixel, and
# kernel[1] through kernel[3] is the tail on all ends.
SH_2F_SHARPEN = vv.shaders.ShaderCodePart('sharpen','unsharp masking',
"""
>>--uniforms--
uniform float amount;
// --uniforms--
>>--pre-loop--
sze = 3; // Use full kernel (otherwise it wont work if t.aa == 0)
kernel = vec4(1.0, 0.9, 0.6, 0.3); // approximate Gauss of sigma 2
float kernel_norm = kernel[0] + (kernel[1] + kernel[2] + kernel[3])*2.0;
kernel /= kernel_norm;
// --pre-loop--
>>--post-loop--
float th = 0.05;
vec4 normalColor = texture2D(texture, pos);
// Element-wise mask on blurred image (color1), using a threshold
float mask = float(length(color1.rgb)-length(color2.rgb)>th);
normalColor.rgb += mask * amount * (normalColor.rgb -color1.rgb);
color1 = normalColor;
// --post-loop--
""")
# Read image
im = vv.imread('lena.png')
# Show two times, the second will be sharpened
vv.subplot(121); t1 = vv.imshow(im)
vv.subplot(122); t2 = vv.imshow(im)
# Share cameras and turn off anti-aliasing for proper comparison
t1.parent.camera = t2.parent.camera
t1.aa = 0
# Insert our part in the fragment shader program
t2.shader.fragment.AddOrReplace(SH_2F_SHARPEN, after='base')
if False: # Execute this line to turn it off:
t2.shader.fragment.RemovePart('sharpen')
# Make a slider to set the amount
def sliderCallback(event):
t2.shader.SetStaticUniform('amount', slider.value)
t2.Draw()
slider = vv.Slider(t2.parent, (0.0, 1.5))
slider.position = 0.05, 10, 0.9, 40
slider.eventSliding.Bind(sliderCallback)
sliderCallback(None) # init uniform
# In case there are bugs in the code, it might be helpfull to see the code
# t2.fragmentShader.ShowCode() # Shows the whole code
t2.shader.fragment.ShowCode('sharpen') # Shows only our bit, with line numbers
# Run app
app = vv.use()
app.Run()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.