seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
73435227303 | """setup.py file."""
import uuid
from setuptools import setup, find_packages
try:
# pip >=20
from pip._internal.network.session import PipSession
from pip._internal.req import parse_requirements
except ImportError:
try:
# 10.0.0 <= pip <= 19.3.1
from pip._internal.download import PipSession
from pip._internal.req import parse_requirements
except ImportError:
# pip <= 9.0.3
from pip.download import PipSession
from pip.req import parse_requirements
__author__ = 'Andreas Thienemann <andreas@bawue.net>'
install_reqs = parse_requirements('requirements.txt', session=uuid.uuid1())
reqs = [str(ir.req) for ir in install_reqs]
setup(
name="napalm-procurve",
version="0.7.0",
packages=find_packages(),
author="Andreas Thienemann",
author_email="andreas@bawue.net",
description="Network Automation and Programmability Abstraction Layer (NAPALM) ProCurve driver",
long_description="ProCurve driver support for Napalm network automation.",
classifiers=[
'Topic :: Utilities',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
url="https://github.com/ixs/napalm-procurve",
include_package_data=True,
zip_safe=False,
install_requires=reqs,
)
| ixs/napalm-procurve | setup.py | setup.py | py | 1,391 | python | en | code | 21 | github-code | 36 | [
{
"api_name": "pip.req.parse_requirements",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "uuid.uuid1",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "setuptools.fin... |
15369619842 | from contextlib import contextmanager
from typing import Union
from apiens.error import exc
from .base import ConvertsToBaseApiExceptionInterface
@contextmanager
def converting_unexpected_errors(*, exc=exc):
""" Convert unexpected Python exceptions into a human-friendly F_UNEXPECTED_ERROR Application Error
This function is a catch-all: every expected error should be an instance of `exc.BaseApplicationError`.
Every other Python error is considered to be unexpected and wrapped into an `exc.F_UNEXPECTED_ERROR`.
If the exception defines the `default_api_error()` method, the method is used to convert it into a different error (!)
Raises:
exc.F_UNEXPECTED_ERROR: for unexpected Python errors
"""
try:
yield
except Exception as e:
raise convert_unexpected_error(e, exc=exc)
def convert_unexpected_error(error: Union[Exception, exc.BaseApplicationError], *, exc=exc) -> exc.BaseApplicationError:
""" Given an exception, convert it into a `F_UNEXPECTED_ERROR` if it's not a BaseApplicationError already """
# `exc.BaseApplicationError` remain as they are
if isinstance(error, exc.BaseApplicationError):
return error
# Exception defines a way to convert into API error
if isinstance(error, ConvertsToBaseApiExceptionInterface):
new_error = error.default_api_error()
if new_error is not None:
return new_error
# All other errors are unexpected
return exc.F_UNEXPECTED_ERROR.from_exception(error) | kolypto/py-apiens | apiens/error/converting/exception.py | exception.py | py | 1,528 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "apiens.error.exc",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "apiens.error.exc",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "contextlib.contextmanager",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Uni... |
3102977676 | import json
import os
from flask import Flask, render_template, request
import tensorflow as tf
from tensorflow.contrib import predictor
os.environ['CUDA_VISIBLE_DEVICES']='0'
app = Flask(__name__)
print("# Load lm model...")
config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
config.gpu_options.per_process_gpu_memory_fraction = 0.9
sess = tf.Session(config=config)
predict_fn = predictor.from_saved_model("/data/xueyou/car/comment/lm/score/0/")
@app.route('/predict', methods=['POST'])
def predict():
data = request.get_json(silent=True)
ret = predict_fn(data)['ppl']
ret= [float(v) for v in ret]
return json.dumps(ret)
if __name__ == '__main__':
app.run(host='0.0.0.0', port=9193, debug=False,threaded=True) | flyliu2017/mask_comments | data_process/lm.py | lm.py | py | 766 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tensorflow.ConfigProto",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "tensorflow.Session",... |
8017126240 | import datetime as dt
import logging
from sqlalchemy import (
Column,
ForeignKey,
MetaData,
Table,
UniqueConstraint,
create_engine,
)
from sqlalchemy.dialects import postgresql as pg
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, relationship, sessionmaker
from sqlalchemy.sql.sqltypes import Boolean, Date, DateTime, Integer, String, Text, Time
from app.core.config import settings
logger = logging.getLogger(__name__)
db = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_size=40, max_overflow=10)
Base = declarative_base()
meta = MetaData(db)
Session = sessionmaker(bind=db, autoflush=False)
class User(Base):
__tablename__ = "users"
tg_id = Column(
Integer,
autoincrement=False,
primary_key=True,
)
tg_username = Column(
String,
nullable=False,
)
students_group_id = Column(
Integer,
ForeignKey("students_groups.id"),
nullable=True,
)
is_admin = Column(
Boolean,
nullable=False,
default=False,
)
is_group_moderator = Column(
Boolean,
nullable=False,
default=False,
)
last_active = Column(
DateTime,
nullable=False,
default=dt.datetime.now,
)
students_group = relationship("StudentsGroup", back_populates="students")
subgroups = relationship("Lesson",
secondary="lessons_subgroups_members",
backref=backref("students", lazy="dynamic"),
)
def __repr__(self):
return "<User(tg_id={}, tg_username={})".format(self.tg_id, self.tg_username)
class StudentsGroup(Base):
__tablename__ = "students_groups"
id = Column(
Integer,
primary_key=True,
)
name = Column(
String,
nullable=False,
)
course = Column(
Integer,
nullable=False,
)
faculty_id = Column(
Integer,
ForeignKey("faculties.id"),
nullable=False,
)
students = relationship("User", order_by=User.tg_id, back_populates="students_group")
lessons = relationship("Lesson", back_populates="students_group")
faculty = relationship("Faculty", back_populates="groups")
requests = relationship("Request", back_populates="students_group")
def __repr__(self):
return "<StudentsGroup(id={}, name={})>".format(self.id, self.name)
class Faculty(Base):
__tablename__ = "faculties"
id = Column(
Integer,
primary_key=True,
)
name = Column(
String,
nullable=False,
unique=True,
)
shortcut = Column(
String,
nullable=False,
)
groups = relationship("StudentsGroup", back_populates="faculty")
def __repr__(self):
return "<Faculty(id={}, name={})>".format(self.id, self.name)
class SingleLesson(Base):
__tablename__ = "single_lessons"
id = Column(
Integer,
primary_key=True,
)
date = Column(
Date,
nullable=False,
)
starts_at = Column(
Time,
nullable=False,
)
ends_at = Column(
Time,
nullable=False,
)
lesson_id = Column(
Integer,
ForeignKey("lessons.id"),
nullable=False,
)
comment = Column(
String,
nullable=True,
)
lesson = relationship("Lesson")
__table_args__ = (
UniqueConstraint("lesson_id", "date", "starts_at", "ends_at",
name="timetable_lesson_complex_key"),
)
def __repr__(self):
return "<SingleLesson(id={}, lesson_id={}, date={}, starts_at={})>" \
.format(self.id, self.lesson_id, self.date, self.starts_at)
LessonTeacher = Table(
"lessons_teachers", Base.metadata,
Column("lesson_id", Integer, ForeignKey("lessons.id")),
Column("teacher_id", Integer, ForeignKey("teachers.id")),
)
# If lesson is divided into subgroups, match each one with its members (users)
LessonSubgroupMember = Table(
"lessons_subgroups_members", Base.metadata,
Column("lesson_id", Integer, ForeignKey("lessons.id")),
Column("user_id", Integer, ForeignKey("users.tg_id")),
)
class Lesson(Base):
__tablename__ = "lessons"
id = Column(
Integer,
primary_key=True,
)
name = Column(
String,
nullable=False,
)
students_group_id = Column(
Integer,
ForeignKey("students_groups.id"),
nullable=False,
)
subgroup = Column(
String,
nullable=True,
)
# 0 - lecture, 1 - seminar, 2 - practical, 3 - lab, 4 - other
lesson_format = Column(
Integer,
nullable=False,
)
link = Column(
String,
nullable=True,
)
teachers = relationship(
"Teacher",
secondary=LessonTeacher,
backref="lessons",
)
students_group = relationship("StudentsGroup", back_populates="lessons")
__table_args__ = (
UniqueConstraint("name", "subgroup", "students_group_id", "lesson_format",
name="lesson_complex_key"),
)
def represent_lesson_format(self):
# TODO: move to enum with representation ability
names = {
0: "лекція",
1: "семінар",
2: "практика",
3: "лабораторна",
4: "інш.",
}
return names[self.lesson_format]
def __repr__(self):
return "<Lesson(id={}, name={})>".format(self.id, self.name)
def __str__(self):
name = "{}".format(self.name)
if self.subgroup is not None:
teachers = ", ".join([t.short_name for t in self.teachers])
name += " ({}, {})".format(self.represent_lesson_format(), teachers)
return name
class Teacher(Base):
__tablename__ = "teachers"
id = Column(
Integer,
primary_key=True,
)
first_name = Column(
String,
nullable=False,
)
last_name = Column(
String,
nullable=False,
)
middle_name = Column(
String,
nullable=False,
)
def __repr__(self):
return "<Teacher(id={}, first_name={}, last_name={}, middle_name={})>" \
.format(self.id, self.first_name, self.last_name, self.middle_name)
def __str__(self):
return self.full_name
@property
def full_name(self):
return " ".join((self.last_name, self.first_name, self.middle_name)).strip()
@property
def short_name(self):
if self.first_name and self.middle_name:
return "{} {}. {}.".format(self.last_name, self.first_name[0], self.middle_name[0])
return self.last_name
class Request(Base):
"""
Requests from common users to students group moderator
to change something in a timetable, post some messages to the channel, etc
"""
__tablename__ = "requests"
id = Column(
Integer,
primary_key=True,
)
# student group this request relates to
students_group_id = Column(
Integer,
ForeignKey("students_groups.id"),
nullable=False,
)
# user who proposed this request
initiator_id = Column(
Integer,
ForeignKey("users.tg_id"),
nullable=False,
)
# moderator who received this request
moderator_id = Column(
Integer,
ForeignKey("users.tg_id"),
nullable=False,
)
# text of the moderator message
message = Column(
Text,
nullable=False,
)
# callback data for the 'Accept' button
accept_callback = Column(
Text,
nullable=False,
)
# callback data for the 'Reject' button
reject_callback = Column(
Text,
nullable=False,
)
# request meta (e.g. {"lesson_id": 1, "link": "https://dc.zoom.us/xxx"})
meta = Column(
pg.JSONB,
nullable=False,
)
is_resolved = Column(
Boolean,
nullable=False,
default=False,
)
students_group = relationship("StudentsGroup", back_populates="requests")
initiator = relationship("User", foreign_keys=[initiator_id])
moderator = relationship("User", foreign_keys=[moderator_id])
def __repr__(self):
return "<Request(id={}, students_group_id={})>" \
.format(self.id, self.students_group_id)
| iterlace/knu_assistant | assistant/src/app/database.py | database.py | py | 8,494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "app.core.config.settings.SQLALCHEMY_DATABASE_URI",
"line_number": 21,
"usage_type": "attribute... |
73915028905 | import sqlite3
import utils
import csv
con = sqlite3.connect('bookswort.sqlite')
cur = con.cursor()
def positionStdDev(Books_id,Words_id):
import statistics
cur.execute('''SELECT position FROM Textorder WHERE Books_id = ? AND
Words_id = ?''', (Books_id,Words_id))
positions = [v[0] for v in cur.fetchall()]
if len(positions)<2:
return 0
else:
return statistics.stdev(positions)
def buildSummary(Books_id,limit):
import math
cur.execute('''DELETE FROM Summary WHERE Books_id = ?''',(Books_id,))
cur.execute('''SELECT Words.word,Words.id,Counts.count FROM Words JOIN Counts
ON Counts.Words_id = Words.id AND Counts.Books_id = ?
AND (Words.status = 1 OR Words.status = 2)
ORDER BY Counts.count DESC LIMIT ? ''',(Books_id,limit))
wordsforanalysis = cur.fetchall()
#How many unique words in book
cur.execute('''SELECT COUNT(Words.id) FROM Words JOIN Counts
ON Counts.Words_id = Words.id AND Counts.Books_id = ?''',(Books_id,))
uniquewords = cur.fetchone()[0]
#Calculate book word Count
cur.execute(''' SELECT SUM(Counts.count) FROM Counts
WHERE Counts.books_id = ? ''', (Books_id,))
totalwords = cur.fetchone()[0]
for record in wordsforanalysis:
Words_id = record[1]
wordcount = record[2]
cur.execute('SELECT permillion FROM Words WHERE id = ?', (Words_id,))
permilliondb = cur.fetchone()[0]
cur.execute('SELECT status FROM Words WHERE id = ?', (Words_id,))
status = cur.fetchone()[0]
# Calculate usage
calcpermillion = wordcount / totalwords * 1000000
if status == 1:
usage = calcpermillion * math.log10(1/permilliondb)
elif status == 2:
usage = 0
spread = positionStdDev(Books_id,Words_id) * usage
cur.execute('SELECT * FROM Summary WHERE Books_id = ? AND Words_id=?',(Books_id,Words_id))
row = cur.fetchone()
if row is None: #Record does not exist in db
cur.execute('''INSERT INTO Summary (Books_id, Words_id, permillion, usage, spread, statusref)
VALUES (?,?,?,?,?,?)''',(Books_id, Words_id, calcpermillion, usage, spread, status))
else:
cur.execute('''UPDATE Summary SET permillion=?,usage=?,spread=?,statusref=? WHERE Books_id = ?
AND Words_id=?''', (calcpermillion,usage,spread,status,Books_id,Words_id))
con.commit()
return
def normalizeCriteria(criteria):
normCriteria = {}
maximum_id = max(criteria, key=criteria.get)
maximum = criteria[maximum_id]
minimum_id = min(criteria, key=criteria.get)
minimum = criteria[minimum_id]
for key in criteria:
normCriteria[key] = (criteria[key] - minimum) / (maximum - minimum)
return normCriteria
def getKeywords(Books_id, howmany):
usages = {}
cur.execute('''SELECT Words_id, usage FROM Summary WHERE Books_id = ? AND usage IS NOT NULL
ORDER BY usage DESC''',(Books_id,))
for row in cur.fetchall():
usages[row[0]] = row[1]
normUsages = normalizeCriteria(usages)
spreads = {}
cur.execute('''SELECT Words_id, spread FROM Summary WHERE Books_id = ? AND spread IS NOT NULL
ORDER BY spread DESC''',(Books_id,))
for row in cur.fetchall():
spreads[row[0]] = row[1]
normSpreads = normalizeCriteria(spreads)
unknowns = {}
cur.execute('''SELECT Words_id, permillion FROM Summary WHERE Books_id = ? AND statusref = 2
ORDER BY permillion DESC''',(Books_id,))
for row in cur.fetchall():
unknowns[row[0]] = row[1]
normUnknowns = normalizeCriteria(unknowns)
#Using weights from Rank exponent weight method, p=7, 3 criterion ranked as:
# 1.usage, 2.spread & 3.unknown, Sum of weights = 1
usageWeight = 0.9443#0.6111
spreadWeight = 0.0553#0.2778
unknownWeight = 0.0004#0.1111
scores = {}
for word_id in normUsages:
scores[word_id] = normUsages[word_id] * usageWeight
for word_id in normSpreads:
scores[word_id] = scores.get(word_id,0) + normSpreads[word_id] * spreadWeight
for word_id in normUnknowns:
scores[word_id] = scores.get(word_id,0) + normUnknowns[word_id] * unknownWeight
keywords = {}
for word_id in scores:
cur.execute('SELECT word FROM Words WHERE id = ?',(word_id,))
keywords[cur.fetchone()[0]] = round(scores[word_id],4)
sortedkeywords = sorted([(value, key) for (key,value) in keywords.items()],reverse=True)
return [ (key,value) for (value,key) in sortedkeywords ][:int(howmany)]
# This produces a CSV file with all the analysis from Summary to use for improving algorithm parameters
def rawOutputSummary(Books_id):
cur.execute('''SELECT Words.word,Counts.count, Summary.permillion, Summary.usage,
Summary.spread, Summary.statusref
FROM Words JOIN Summary JOIN Counts
ON Words.id = Summary.Words_id AND Summary.Books_id = ?
AND Counts.Books_id=Summary.Books_id AND Counts.Words_id = Summary.Words_id
ORDER BY Counts.count DESC''',(Books_id,))
fwriter = open('rawoutput.csv','w',newline='')
with fwriter:
writer = csv.writer(fwriter)
writer.writerow(('Word','Count','perMillion','usage','spread','StatusRef'))
for row in cur.fetchall():
writer.writerow(row)
print(row)
print('--- File rawoutput.csv was generated with results ---')
return
utils.printWordsStats()
utils.printBooks()
Books_id = utils.inputBookid()
#TODO Merge Summary table with Counts table
cur.executescript('''
CREATE TABLE IF NOT EXISTS Summary (
Books_id INTEGER,
Words_id INTEGER,
permillion REAL,
usage REAL,
spread REAL,
statusref INTEGER,
PRIMARY KEY (Books_id, Words_id)
)
''')
while True:
inputtext = input('(1)Build Summary, (2)Generate Keywords, (3)Raw Output, (q) Quit :')
if inputtext == '1':
limit = input('limit summary to how many of the top words, type <<all>> to use all words): ')
if limit == 'all':
limit = 10000000 #TODO Put actual limit from SQL Query
buildSummary(Books_id,limit)
elif inputtext == '2':
n = input('How many keywords?')
keywords = getKeywords(Books_id,n)
# Spread the font sizes across 20-100 based on the count
bigsize = 75
smallsize = 15
highest = max([ word[1] for word in keywords ])
lowest = min([ word[1] for word in keywords ])
fhand = open('visualization/gword.js','w')
fhand.write("gword = [")
first = True
for word, score in keywords:
if not first : fhand.write( ",\n")
first = False
size = score
size = (size - lowest) / float(highest - lowest)
size = int((size * bigsize) + smallsize)
fhand.write("{text: '"+word+"', size: "+str(size)+"}")
fhand.write( "\n];\n")
fhand.close()
print("Output written to gword.js")
print("Open gword.htm in a browser to see the vizualization")
fwriter = open('keywords.csv','w',newline='')
with fwriter:
writer = csv.writer(fwriter)
for row in keywords:
writer.writerow(row)
print('--- File keywords.csv was generated with results ---')
elif inputtext == '3':
rawOutputSummary(Books_id)
elif inputtext == 'q':
break
| jevargas-m/bookdistillery | analyzebook.py | analyzebook.py | py | 7,639 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "statistics.stdev",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.log10",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_num... |
1202982324 | from enum import Enum
import re
import datetime as dt
import pytz
from os.path import basename
from pandas import read_csv, to_datetime
class StrEnum(str, Enum):
def __str__(self):
return self._value_
@classmethod
def values(cls):
return [v.value for _, v in cls.__members__.items()]
class EnrichedOHLCVN(object):
@classmethod
def name(cls):
return 'EnrichedOHLCVN'
class Fields(StrEnum):
OPEN = 'open'
CLOSE = 'close'
HIGH = 'high'
HASK = 'hask'
HBID = 'hbid'
LOW = 'low'
LASK = 'lask'
LBID = 'lbid'
CASK1 = 'cask1'
CASK2 = 'cask2'
CASK3 = 'cask3'
CASKV1 = 'caskv1'
CASKV2 = 'caskv2'
CASKV3 = 'caskv3'
CBID1 = 'cbid1'
CBID2 = 'cbid2'
CBID3 = 'cbid3'
CBIDV1 = 'cbidv1'
CBIDV2 = 'cbidv2'
CBIDV3 = 'cbidv3'
TBUYC = 'tbuyc'
TSELLC = 'tsellc'
TBUYV = 'tbuyv'
TSELLV = 'tsellv'
TBUYVWAP = 'tbuyvwap'
TSELLVWAP = 'tsellvwap'
TWABMP = 'twabmp'
NET_VOLUME = 'net_volume'
VOLUME = 'volume'
SOFTWARE_TIME = 'software_time'
TRIGGER_TIME = 'trigger_time'
EXCH_TIME = 'exch_time'
class Tags(StrEnum):
PRODUCT = 'product'
TYPE = 'type'
EXPIRY = 'expiry'
CLOCK_TYPE = 'clock_type'
WIDTH = 'width'
OFFSET = 'offset'
class ContinuousContract(object):
@classmethod
def name(cls):
return 'continuous_contract'
class Fields(StrEnum):
SHORT_CODE = 'short_code'
TIME_ZONE = 'time_zone'
class Tags(StrEnum):
EXPIRY = 'expiry'
PRODUCT = 'product'
ROLL_STRATEGY = 'roll_strategy'
TYPE = 'type'
class Basedb(object):
UNDEFINED = 999999999998
ENRICHEDOHLCVN = EnrichedOHLCVN.name()
TABLE = 'table'
TABLES = {ENRICHEDOHLCVN: EnrichedOHLCVN}
class Quantdb1(Basedb):
DBNAME = 'bar'
USERNAME = 'root'
PASSWORD = 'root123'
HOSTNAME = 'lcldn-quantdb1'
PORT = 8086
class Quantsim1(Basedb):
DBNAME = 'bar_data'
USERNAME = 'root'
PASSWORD = 'root123'
HOSTNAME = 'lcmint-quantsim1'
PORT = 8086
ENRICHEDOHLCVN = EnrichedOHLCVN.name()
CONTINUOUS_CONTRACT = ContinuousContract.name()
TABLES = {ENRICHEDOHLCVN: EnrichedOHLCVN,
CONTINUOUS_CONTRACT: ContinuousContract}
class Lcmquantldn1(Basedb):
BASEDIR = '/opt/data'
HOSTNAME = 'lcmquantldn1'
class EnrichedOHLCVN(EnrichedOHLCVN):
YEAR = 'year'
FILE_STRUCTURE = [
EnrichedOHLCVN.Tags.TYPE,
EnrichedOHLCVN.Tags.PRODUCT,
EnrichedOHLCVN.Tags.EXPIRY,
Basedb.TABLE,
EnrichedOHLCVN.Tags.CLOCK_TYPE,
EnrichedOHLCVN.Tags.WIDTH,
YEAR]
DATE_FMT = '%Y%m%d'
TIMEZONE = pytz.UTC
@classmethod
def date_from_filename(cls, fn):
fn = basename(fn)
return cls.TIMEZONE.localize(dt.datetime.strptime(re.search('[0-9]{8}', fn).group(), cls.DATE_FMT))
@classmethod
def read_func(cls):
return lambda x: read_csv(x,
parse_dates=[0],
date_parser=lambda y: cls.TIMEZONE.localize(to_datetime(int(y))),
index_col=0)
ENRICHEDOHLCVN = EnrichedOHLCVN.name()
CONTINUOUS_CONTRACT = ContinuousContract.name()
TABLES = {ENRICHEDOHLCVN: EnrichedOHLCVN,
CONTINUOUS_CONTRACT: ContinuousContract}
# TABLE = 'table'
# PRODUCT = 'product'
# PTYPE = 'type'
# EXPIRY = 'expiry'
# CLOCK = 'clock'
# WIDTH = 'width'
# YEAR = 'year'
#
# FILE_STRUCTURE = [
# PTYPE,
# PRODUCT,
# EXPIRY,
# TABLE,
# CLOCK,
# WIDTH,
# YEAR]
#
# DATE_FMT = '%Y%m%d'
#
# @classmethod
# def date_from_filename(cls, fn):
# fn = basename(fn)
# return dt.datetime.strptime(re.search('[0-9]{8}', fn).group(), cls.DATE_FMT)
#
#
# @classmethod
# def read_func(cls):
# return lambda x: read_csv(x, parse_dates=[0], date_parser=lambda x: to_datetime(int(x)), index_col=0)
dbbox_configs = {'quantdb1': Quantdb1,
'quantsim1': Quantsim1,
'lcmquantldn1': Lcmquantldn1} | sheryllan/tickdb | bar_checks/bar/datastore_config.py | datastore_config.py | py | 4,478 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pytz.UTC",
"line_number": 145,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_number": 149,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime... |
12131483929 | from youtube_transcript_api import YouTubeTranscriptApi
import re
import string
import copy
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
from sklearn.feature_extraction.text import CountVectorizer
import spacy
from spacy import displacy
import nltk
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
import streamlit as st
import spacy_streamlit
spacy_model = "en_core_web_sm"
import spacy
nlp = spacy.load(spacy_model)
from PIL import Image
###### UTILS ######
STOPWORDS.add('')
wordnet_lemmatizer = WordNetLemmatizer()
def find_words_time_range(start,end,df):
# INPUTS:
# start: minutos de inicio do range desejado
# end: minutos de fim do range desejado
# df: dicionário com lista de palavras em cada frame
# OUTPUTS:
# lista de palavras no range solicitado
start = start
end = end
return df[(df['start']>=start)&(df['end']<=end)]['text'].sum()
@st.cache
def load_image(img):
im = Image.open(img)
return im
def main():
st.title('Desafio 1STi')
menu = ['Home','Análises Exploratórias 1','Análises Exploratórias 2','NER']
choice = st.sidebar.selectbox('Menu',menu,)
if choice == 'Home':
st.markdown('# O desafio')
st.markdown('O desafio de Data Science da 1STI teve os seguintes requisitos:')
st.markdown('1. Obter a transcrição de um vídeo do youtube')
st.markdown('2. Obter insights a partir de análise exploratória')
st.markdown('3. Aplicar um modelo de reconhecimento de entidades pré-treinado utilizando a transcrição como input')
st.markdown('4. [EXTRA] Prototipar a solução utilizando a biblioteca streamlit')
st.markdown('# Objetivo')
st.markdown('Meu objetivo durante o desafio foi entender e estudar as ferramentas e bibliotecas utilizadas em NLP,\
criar visualizações e análises que fizessem sentido para a geração de insights e expor via interface gráfica de maneira a criar uma ferramenta \
facil de utilizar')
st.markdown('# Principais dificuldades')
st.markdown('Os principais desafios foram:')
st.markdown('1. Encontrar vídeos que fizessem sentido aplicar um modelo NER, pois percebi que pelo objetivo do modelo alguns vídeos não gerariam insights relevantes')
st.markdown('2. Aprender detalhes de funcionamento da biblioteca Streamlit e montar uma interface que fizesse sentido com o meu objetivo')
elif choice == 'Análises Exploratórias 1':
st.subheader('Análises de palavras em todo o vídeo')
raw_docx = st.text_area('Insira a parte final da URL do vídeo a ser analisado','fC9da6eqaqg')
if st.button("Enter"):
srt = YouTubeTranscriptApi.get_transcript(raw_docx)
### Pre processing texto inteiro ###
# Juntando strings em uma
txt_raw = " ".join([item['text'] for item in srt])
st.markdown('# Texto transcrito')
st.write(txt_raw)
# Normalizando
txt_raw = re.sub('\t|\n',' ',txt_raw)
txt = re.sub('“|”',' ',txt_raw)
txt = txt_raw.lower()
txt = txt.translate(str.maketrans('', '', string.punctuation.replace("'",'')))
txt = re.sub(r'(^|\s)\d+($|\s)',' ',txt)
txt = txt.replace("’","'")
# Lista de palavras sem as stopwords
list_words = [wordnet_lemmatizer.lemmatize(word,pos= 'v') for word in txt.split(' ') if not word in STOPWORDS]
### Pre processing texto no tempo ###
# Adicionando a um DataFrame
srt_time_processing = pd.DataFrame(srt)
# Adicionando coluna de "end"
srt_time_processing['end'] = srt_time_processing['start'] + srt_time_processing['duration']
# Normalização das strings
srt_time_processing['text'] = srt_time_processing['text'].str.lower()
srt_time_processing['text'] = srt_time_processing['text'].apply(lambda t: t.translate(str.maketrans('', '', string.punctuation.replace("'",''))))
srt_time_processing['text'] = srt_time_processing['text'].str.replace('\t|\n|“|”',' ',regex=True)
srt_time_processing['text'] = srt_time_processing['text'].str.replace('’',"'")
srt_time_processing['text'] = srt_time_processing['text'].str.replace(r'(^|\s)\d+($|\s)',' ',regex = True)
# Retirando stop words e formando lista de palarvras
srt_time_processing['text'] = srt_time_processing['text'].apply(lambda t: [wordnet_lemmatizer.lemmatize(word,pos= 'v') for word in t.split(' ') if not word in STOPWORDS])
####### Contagem de palavras ##########
st.markdown('# Contagem de palavras')
vectorizer = CountVectorizer()
X = vectorizer.fit_transform([' '.join(list_words)])
# Printando
st.write(pd.Series(index = vectorizer.get_feature_names(),
data = X.toarray()[0]).sort_values(ascending = False).to_frame(name='Contagem'))
# Gerando figura de bigramas
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color="black",
width=1600, height=800).generate(' '.join(list_words))
# Plot
fig, ax = plt.subplots(figsize=(10,6))
ax.imshow(wordcloud, interpolation='bilinear')
ax.set_axis_off()
st.pyplot(fig)
###### Contando bigramas ######
st.markdown('# Contagem de bigramas')
vectorizer = CountVectorizer(ngram_range = (2,2))
X = vectorizer.fit_transform([' '.join(list_words)])
st.write(pd.Series(index = vectorizer.get_feature_names(),
data = X.toarray()[0]).sort_values(ascending = False).to_frame(name='Contagem'))
# Gerando figura de palavras
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color="black",
collocation_threshold = 3,
width=1600, height=800).generate(' '.join(list_words))
# Plot
fig, ax = plt.subplots(figsize=(10,6))
ax.imshow(wordcloud, interpolation='bilinear')
ax.set_axis_off()
st.pyplot(fig)
##### Palavras por segundo ######
st.markdown('# Palavras por segundo no tempo de vídeo')
# Contagem de "velocidade"
srt_time_processing['quantity'] = srt_time_processing['text'].apply(lambda x: len(x))
srt_time_processing['quantity per sec'] = (srt_time_processing['quantity'])/(srt_time_processing['duration'])
#Plot
fig, ax = plt.subplots()
ax.plot(srt_time_processing['start'],srt_time_processing['quantity per sec'])
ax.set_xlabel("Tempo do vídeo (em segundos)")
ax.set_ylabel("Quantidade de palavras por segundo")
st.pyplot(fig)
elif choice == 'Análises Exploratórias 2':
st.subheader('Análise de palavras em um range de tempo')
# Buscando vídeo no Youtube
raw_docx = st.text_area('Insira a parte final da URL do vídeo a ser analisado','fC9da6eqaqg')
srt = YouTubeTranscriptApi.get_transcript(raw_docx)
# Definindo range
values = st.slider('Select a range of values',0.0, srt[-1]['start']+srt[-1]['duration'], (0.0, srt[-1]['start']+srt[-1]['duration']))
if st.button("Enter"):
## Preprocessing ##
# Adicionando a um DataFrame
srt_time_processing = pd.DataFrame(srt)
# Adicionando coluna de "end"
srt_time_processing['end'] = srt_time_processing['start'] + srt_time_processing['duration']
# Normalização das strings
srt_time_processing['text'] = srt_time_processing['text'].str.lower()
srt_time_processing['text'] = srt_time_processing['text'].apply(lambda t: t.translate(str.maketrans('', '', string.punctuation.replace("'",''))))
srt_time_processing['text'] = srt_time_processing['text'].str.replace('\t|\n|“|”',' ',regex=True)
srt_time_processing['text'] = srt_time_processing['text'].str.replace('’',"'")
srt_time_processing['text'] = srt_time_processing['text'].str.replace(r'(^|\s)\d+($|\s)',' ',regex = True)
# Retirando stop words e formando lista de palarvras
srt_time_processing['text'] = srt_time_processing['text'].apply(lambda t: [wordnet_lemmatizer.lemmatize(word,pos= 'v') for word in t.split(' ') if not word in STOPWORDS])
# Contagens
list_words_range = find_words_time_range(values[0],values[1],srt_time_processing)
####### Contagem de palavras ##########
st.markdown('# Contagem de palavras')
vectorizer = CountVectorizer()
X = vectorizer.fit_transform([' '.join(list_words_range)])
# Plotando
st.write(pd.Series(index = vectorizer.get_feature_names(),
data = X.toarray()[0]).sort_values(ascending = False).to_frame(name = 'Contagem'))
# Gerando figura de palavras
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color="black",
width=1600, height=800).generate(' '.join(list_words_range))
# Plot
fig, ax = plt.subplots(figsize=(10,6))
ax.imshow(wordcloud, interpolation='bilinear')
ax.set_axis_off()
st.pyplot(fig)
###### Contando bigramas ######
st.markdown('# Contagem de bigramas')
vectorizer = CountVectorizer(ngram_range = (2,2))
X = vectorizer.fit_transform([' '.join(list_words_range)])
st.write(pd.Series(index = vectorizer.get_feature_names(),
data = X.toarray()[0]).sort_values(ascending = False).to_frame(name='Contagem'))
# Gerando figura de palavras
wordcloud = WordCloud(stopwords=STOPWORDS,
background_color="black",
collocation_threshold = 3,
width=1600, height=800).generate(' '.join(list_words_range))
# Plot
fig, ax = plt.subplots(figsize=(10,6))
ax.imshow(wordcloud, interpolation='bilinear')
ax.set_axis_off()
st.pyplot(fig)
elif choice == 'NER':
st.subheader('Named Entity Recognizer')
raw_docx = st.text_area('Insira a parte final da URL do vídeo a ser analisado','fC9da6eqaqg')
if st.button("Enter"):
srt = YouTubeTranscriptApi.get_transcript(raw_docx)
# Juntando strings em uma
txt_raw = " ".join([item['text'] for item in srt])
docx = nlp(txt_raw)
# NER
spacy_streamlit.visualize_ner(docx, labels=nlp.get_pipe("ner").labels)
if __name__ == '__main__':
main() | vitmesquita/desafio_1sti | script/main.py | main.py | py | 9,747 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "spacy.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS.add",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "wordcloud.STOPWORDS"... |
41824110115 | from pandas.io.parsers import read_csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import numpy as np
def carga_csv(file_name):
"""carga"""
valores = read_csv(file_name, header=None).values
return valores.astype(float)
def gradiente(X,Y,Theta, alpha):
NuevaTheta = Theta
m = np.shape(X)[0]
n = np.shape(X)[1]
H = np.dot(X,Theta)
Aux = (H - Y)
for i in range(n):
Aux_i = Aux * X[:, i]
NuevaTheta[i] -= (alpha / m) * Aux_i.sum()
return NuevaTheta
def coste(X, Y, Theta):
H = np.dot(X, Theta)
Aux = (H - Y) ** 2
return Aux.sum() / (2 * len(X))
def descenso_gradiente(X,Y, alpha):
var = [0,0,0]
lista = [ ]
mini = np.inf
iteracion = [i for i in range(1500)]
for i in range(1500):
var = gradiente(X,Y,var.copy(), alpha) #Conseguimos recta para saber los puntos que están mas cercana
mini = min(mini, coste(X,Y,var))
lista.append(mini)
plt.plot(iteracion, lista)
return var, lista
def normalizar(X):
media = np.mean(X)
desviacion = np.std(X)
X = (X - media) / desviacion
mu = np.dot(X[:,1:], media)
sigma = np.dot(X[:,1:], desviacion)
return mu, sigma, X
def main():
datos = carga_csv('ex1data2.csv')
X = datos[:, :-1]
Y = datos[:, -1]
m = np.shape(X)[0]
n = np.shape(X)[1]
mu, sigma, nueva_X = normalizar(X)
nueva_X = np.hstack([np.ones([m, 1]), nueva_X]) #añadimos una columna de 1's a la X
alpha = 0.3
Thetas, costes = descenso_gradiente(nueva_X, Y, alpha)
alpha_list = [0.001,0.003,0.01,0.03,0.1,0.3]
for alpha in alpha_list:
thetas,costes = descenso_gradiente(nueva_X,Y,alpha)
plt.legend(alpha_list)
plt.show()
main()
| nesi73/AA | Practica 1/Practica1.2.py | Practica1.2.py | py | 1,873 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.io.parsers.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.shape",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"l... |
42090081109 | from pathlib import Path
import logging
import shlex
import subprocess
import sys
# Returns the URIs of all the files inside 'path'.
def get_uri_list(path: Path) -> list:
uri_list = []
for file in path.iterdir():
if file.is_file():
uri_list.append(file.as_uri())
if uri_list == []:
logging.error(f'Please insert a wallpaper inside "{path}".')
sys.exit(1)
return uri_list
def run_command_with_arg(command: str, wallpaper: str) -> None:
# Split the command into tokens and append the properly formatted wallpaper URI.
subprocess.run(shlex.split(command) + [wallpaper.replace(' ', '%20')])
# Checks if folders inside directory tree exist, and if not, creates them.
def check_dir_tree_exists(dir_list: list) -> None:
for dir in dir_list:
if type(dir) is list:
check_dir_tree_exists(dir)
elif not dir.exists():
logging.warning(f'Folder {dir} does not exist! Creating it now...')
dir.mkdir(parents=True, exist_ok=True)
# Checks if the log file exists inside the specified folder, and if not, creates it.
def check_log_file_exists(log_file_dir: Path):
if not log_file_dir.exists():
logging.warning(f'No log file found in {log_file_dir}! Creating it now...')
log_file_dir.parent.mkdir(parents=True, exist_ok=True)
log_file_dir.touch(exist_ok=True)
# Appends the "root" wallpaper path to each mode's path.
def append_wallpaper_path(root: Path, mode_list: list) -> None:
for mode in mode_list:
for n, path in enumerate(mode.path_list):
mode.path_list[n] = root.joinpath(path) | Dante-010/change_wallpaper.py | helper_functions.py | helper_functions.py | py | 1,642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "logging.error",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "subprocess.run",
"line_number"... |
37746699511 | import requests
from bs4 import BeautifulSoup
from lxml.html import fromstring
from itertools import cycle
import traceback
class Scrape:
proxies = ""
proxy_pool = ""
def __init__(self):
self.proxies = self.get_proxies()
self.proxy_pool = cycle(self.proxies)
def get_proxies(self):
url = 'https://free-proxy-list.net/'
response = requests.get(url)
parser = fromstring(response.text)
proxies = set()
for i in parser.xpath('//tbody/tr')[:10]:
if i.xpath('.//td[7][contains(text(),"yes")]'):
#Grabbing IP and corresponding PORT
proxy = ":".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])
proxies.add(proxy)
return proxies
def getAlbumCoverURL(self, artist_name, song_name):
artist_name = artist_name.replace('.', '').replace(' ','').replace("'", "").lower()
song_name = song_name.replace(' ','-').replace("'", "").lower()
track_slug = '-'.join([artist_name, song_name])
client_access_token = '3u-fiiKZSn0n4jUO-cuxNamghC92pdYZQrnbzzb7AxuEMjnPcbJU71i5rytUhipZ'
base_url = 'https://api.genius.com'
path = 'search/'
request_uri = '/'.join([base_url, path])
# query = 'jcole-change'
params = {'q': track_slug}
token = 'Bearer {}'.format(client_access_token)
headers = {'Authorization' : token}
r = requests.get(request_uri, params=params, headers=headers)
json = r.json()
album_img = json['response']['hits'][0]['result']['header_image_url']
return album_img
def getSongLyrics(self, artist_name, song_name):
artist_name = artist_name.replace('.', '').replace(' ','').replace("'", "").lower()
song_name, sep, tail = song_name.partition('(')
song_name = song_name.strip().replace(' ','-').replace("'", "").replace(".", "").lower()
lyric_url = 'https://www.azlyrics.com/lyrics/{}/{}.html'.format(artist_name, song_name)
proxy = next(self.proxy_pool)
while(proxy):
try:
r = requests.get(lyric_url, proxies={'http':proxy, 'https':proxy})
start_idx = r.text.find('that. -->')
lyrics = r.text[start_idx::]
last_idx = lyrics.find('</div>')
lyrics = lyrics[10:last_idx]
lyrics = lyrics.replace('<br>', '')
if lyrics == "":
proxy = next(self.proxy_pool)
else:
return lyrics
except:
print('Request denied, going next')
proxy = next(self.proxy_pool)
return 'Failed'
# soup = BeautifulSoup(r.text, 'html.parser')
# print(soup.prettify())
def getTrackList(self, artist_name, album_name):
artist_name = artist_name.replace('.', '').replace(' ','-').lower()
album_name = album_name.replace('.',' ').strip().replace(' ', '-').lower()
album_url = 'https://genius.com/albums/{}/{}'.format(artist_name, album_name)
r = requests.get(album_url)
soup = BeautifulSoup(r.text, 'html.parser')
muddy_list = soup.select('h3[class="chart_row-content-title"]')
tracks = []
for ele in muddy_list:
ele = str(ele)
start_idx = ele.find('e">')
last_idx = ele.find('<s')
track = ele[start_idx+3:last_idx].strip()
tracks.append(track)
return tracks
def createSlug(self, artist_name, song_name):
artist_name = artist_name.replace('.', '').replace(' ','').replace("'", "").lower()
song_name = song_name.replace(' ','-').replace("'", "").lower()
track_slug = '-'.join([artist_name, song_name])
return track_slug.strip('-')
| msheroubi/album-central | main/scripts/scrape - Proxy Loop.py | scrape - Proxy Loop.py | py | 3,457 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.cycle",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "lxml.html.fromstring",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
... |
32143174021 | import os
import SimpleITK as sitk
import slicer
from math import pi
import numpy as np
# read input volumes
fixedImageFilename = '/Users/peterbehringer/MyImageData/ProstateRegistrationValidation/Images/Case1-t2ax-intraop.nrrd'
movingImageFilename= '/Users/peterbehringer/MyImageData/ProstateRegistrationValidation/Images/Case1-t2ax-N4.nrrd'
fixedVolume=sitk.ReadImage(fixedImageFilename, sitk.sitkFloat32)
movingVolume=sitk.ReadImage(movingImageFilename, sitk.sitkFloat32)
# read input masks
fixedMaskFilename = '/Users/peterbehringer/MyImageData/ProstateRegistrationValidation/Segmentations/Rater1/Case1-t2ax-intraop-TG-rater1.nrrd'
movingMaskFilename= '/Users/peterbehringer/MyImageData/ProstateRegistrationValidation/Segmentations/Rater1/Case1-t2ax-TG-rater1.nrrd'
fixedMask=sitk.ReadImage(fixedMaskFilename, sitk.sitkFloat32)
movingMask=sitk.ReadImage(movingMaskFilename, sitk.sitkFloat32)
# set output file paths
outputTransform = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/OutTrans_Rigid_1.h5'
outputVolume = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/OutVol_Rigid_1.nrrd'
outputTransform_Initializer = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/OutTrans_Initializer_Rigid__1.h5'
ctx1Data = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/ctx1.h5'
ctx2Data = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/ctx2.h5'
eulerTransPath = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/PetersTry_InitialTrans.h5'
eulerTransPathAfterRotation = '/Users/peterbehringer/MyTesting/SimpleITK_Tests/eulerTransAfterRotation.h5'
rotatedImage='/Users/peterbehringer/MyTesting/SimpleITK_Tests/Rotated_image_1.nrrd'
bestEulerTransPath='/Users/peterbehringer/MyTesting/SimpleITK_Tests/PetersTry_AfterEulerRotation.h5'
outTxPath='/Users/peterbehringer/MyTesting/SimpleITK_Tests/PetersTry_AfterRigid.h5'
quickSetVersorInitial='/Users/peterbehringer/MyTesting/SimpleITK_Tests/quickVersorInitial.h5'
# INITIALIZATION
# _______________________________
# Initialize ImageRegistrationMethod()
Reg=sitk.ImageRegistrationMethod()
Reg.SetMetricFixedMask(fixedMask)
Reg.SetMetricMovingMask(movingMask)
Reg.SetMetricAsCorrelation()
Reg.SetInterpolator(sitk.sitkLinear)
Reg.SetOptimizerAsRegularStepGradientDescent(learningRate=2.0,
minStep=1e-4,
numberOfIterations=1,
gradientMagnitudeTolerance=1e-8 )
# Set the Euler3DTransform
eulerTrans=sitk.Euler3DTransform(sitk.CenteredTransformInitializer(fixedMask,movingMask,sitk.Euler3DTransform()))
Reg.SetInitialTransform(eulerTrans)
# write the transform
sitk.WriteTransform(eulerTrans,eulerTransPath)
# ROTATE & MEASURE METRIC
# like here https://github.com/BRAINSia/BRAINSTools/blob/19fa37dfbdee37deff4ccee412bb601f7a787bda/BRAINSCommonLib/BRAINSFitHelperTemplate.hxx#L325-L339
# _______________________________
one_degree=1.0*pi/180.0
axAngleRange = 1.0
# sagAngleRange = 12.0
axStepSize = 3.0 * one_degree
# sagStepSize = 3.0 * one_degree
# set current Metric value
initialMetricValue= Reg.MetricEvaluate(fixedVolume,movingVolume)
# initialize output Transform with Translation from CenteredTransformInitializer
bestEulerTrans=sitk.Euler3DTransform(sitk.CenteredTransformInitializer(fixedMask,movingMask,sitk.Euler3DTransform()))
for axAngle in np.arange(-axAngleRange,axAngleRange,axStepSize):
#for sagAngle in np.arange(-sagAngleRange,sagAngleRange,sagStepSize):
eulerTrans.SetRotation(0,0,axAngle)
print ('current axAngle : '+str(axAngle))
Reg.SetInitialTransform(eulerTrans)
currentMetricValue = Reg.MetricEvaluate(fixedVolume,movingVolume)
if currentMetricValue < initialMetricValue:
print ('new best axAngle : '+str(axAngle))
bestEulerTrans.SetRotation(0,0,axAngle)
# print ('new best Euler Trans found at sagAngle = '+str(sagAngle)+' and axAngle = '+str(axAngle))
sitk.WriteTransform(bestEulerTrans,bestEulerTransPath)
# RIGID REGISTRATION PHASE
# _______________________________
quickSetVersor=sitk.VersorRigid3DTransform()
# quickSetVersor.SetCenter(bestEulerTrans.GetCenter())
# quickSetVersor.SetTranslation(bestEulerTrans.GetTranslation())
# quickSetVersor.SetMatrix(bestEulerTrans.GetMatrix())
sitk.WriteTransform(quickSetVersor,quickSetVersorInitial)
Reg2=sitk.ImageRegistrationMethod()
Reg2.SetInitialTransform(quickSetVersor)
Reg2.SetMetricAsCorrelation()
Reg2.SetMetricFixedMask(fixedMask)
Reg2.SetMetricMovingMask(movingMask)
Reg2.SetInterpolator(sitk.sitkLinear)
# BRAINSFIT IGT SLICER 3.6 PARAMS
# --minimumStepLength 0.005
# --numberOfIterations 1500
# --translationScale 1000
# BRAINSFitHelperTemplate.hxx PARAMS
# m_MaximumStepLength(0.2)
# m_MinimumStepLength(1, 0.005)
# m_RelaxationFactor(0.5)
# m_ProjectedGradientTolerance(1e-5)
# PARAMS SetOptimizerAsRegularStepGradientDescent
# double learningRate, ?
# double minStep, 0.005
# unsigned int numberOfIterations, 100 (1500 actually)
# double relaxationFactor 0.5
# double gradientMagnitudeTolerance 1e-5
# EstimateLearningRateType estimateLearningRate=Never ?
# double maximumStepSizeInPhysicalUnits 0.2
Reg2.SetOptimizerAsRegularStepGradientDescent(learningRate=2.0,
minStep=0.005,
numberOfIterations=100,
relaxationFactor = 0.5,
gradientMagnitudeTolerance=1e-5,
maximumStepSizeInPhysicalUnits=0.2)
outTx = Reg2.Execute(fixedVolume, movingVolume)
sitk.WriteTransform(outTx,outTxPath)
| PeterBehringer/BRAINSFit_to_SimpleITK | _old/simpleITK_EulerRotation.py | simpleITK_EulerRotation.py | py | 5,841 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "SimpleITK.ReadImage",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "SimpleITK.sitkFloat32",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "SimpleITK.ReadImage",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Si... |
18645146697 | from pyttsx3 import init
from speech_recognition import Recognizer,Microphone
from webbrowser import open
import wikipedia
from datetime import date,datetime
# pip install pyttsx3 speech_recognition,wikipedia
''' speaking methord'''
def speak(output):
engine =init()
engine.setProperty('rate',120)
engine.say(output)
engine.runAndWait()
''' listing methord'''
def listen():
while True:
r = Recognizer()
r.energy_threshold =300
with Microphone() as source:
r.adjust_for_ambient_noise(source, duration = 1)
audio =r.listen(source)
speak("sir plaese say ")
try:
input =r.recognize_google(audio)
check(input)
except:
speak("sir plaese repeat")
''' Check methord'''
def check(input):
input =input.lower()
if 'date' in input:
today = date.today()
speak("Today's date:", today)
return
if 'time' in input:
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
speak("Current Time =", current_time)
return
if 'open google' in input:
speak("open google")
open("www.google.com")
return
if 'wikipedia' in input:
speak("Serching sir...")
input =input.replace("wikipedia","")
result =wikipedia.summary(input,sentences =2)
speak(result)
return
if 'open facebook' in input:
speak("open facebook")
open("www.facebook.com")
return
if 'open youtube' in input:
speak("What you want on youtube")
open(f"www.youtube.com")
return
if 'open stackoverflow' in input:
speak("open stackoverflow")
open("www.stackoverflow.com")
return
if 'open instagram' in input:
speak("open instagram")
open("www.instagram.com")
return
if 'open gmail' in input:
speak("open gmail")
open("www.gmail.com")
return
else:
speak("Sorry sir i don't do this")
if __name__ == "__main__":
listen()
| MohitKumar-stack/personal-computer-assistance | jarvis.py | jarvis.py | py | 2,308 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyttsx3.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Recognizer",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "speech_recognition.Microphone",
"line_number": 22,
"usage_type": "call"
},
{
"api_name... |
74612450025 | #有序字典
from collections import OrderedDict
from pyexcel_xls import get_data
def readXlsAndXlsxFile(path):
#创建一个有序的字典,excel中的数据都是有顺序的,所以用有序字典
dic = OrderedDict()
#抓取数据
xdata = get_data(path) #OrderedDict([('Sheet1', [['年度', '总人口(万人)', '出生人口(万人)', '死亡人口(万人)'.....
for sheet in xdata:
dic[sheet] = xdata[sheet]
return dic
path1 =r"C:\Users\Lenovo\Desktop\课程设计\aleardy deal data.xls"
path2 =r"C:\Users\Lenovo\Desktop\课程设计\2015data.xlsx"
readXlsAndXlsxFile(path2)
dic = readXlsAndXlsxFile(path2)
print(dic) | hanyb-sudo/hanyb | 自动化办公/4、excel自动化办公/3、返回xls和xlsx文件内容.py | 3、返回xls和xlsx文件内容.py | py | 666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyexcel_xls.get_data",
"line_number": 11,
"usage_type": "call"
}
] |
18924228815 | from selenium import webdriver
from selenium.webdriver.common.by import By
import time
class SwitchToFrame():
def test1(self):
baseUrl = "https://letskodeit.teachable.com/pages/practice"
driver = webdriver.Firefox()
driver.maximize_window()
driver.get(baseUrl)
driver.find_element(By.ID, "name").send_keys("Anil")
driver.find_element(By.ID, "alertbtn").click()
time.sleep(2)
alert1 = driver.switch_to.alert
alert1.accept()
time.sleep(2)
driver.find_element(By.ID, "name").send_keys("Anil")
driver.find_element(By.ID, "confirmbtn").click()
time.sleep(2)
alert2 = driver.switch_to.alert
alert2.dismiss()
ff = SwitchToFrame()
ff.test1() | PacktPublishing/-Selenium-WebDriver-With-Python-3.x---Novice-To-Ninja-v- | CODES/S23 - Selenium WebDriver -_ Switch Window And IFrames/5_switch-to-alert.py | 5_switch-to-alert.py | py | 764 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.ID",
"line_number": 14,
"usage_type": "attribute"
},
{
... |
3585776354 | # Local Imports
from utils import BertBatchInput, BertSingleDatasetOutput, BertBatchOutput
# Standard Imports
# Third Party Imports
import torch
from torch import nn
from torch.nn import MSELoss
from transformers import BertPreTrainedModel, BertModel
class ArgStrModel(BertPreTrainedModel):
"""Handles weights and regressor initialization. Adjusted forward pass to allow for multi task learning."""
def __init__(self, config=None, dropout_prob=0.2, bert_hidden_layers=None,
mlp_config=None, task_dict=None, device=None):
super(ArgStrModel, self).__init__(config)
self.bert = BertModel(config)
self.hidden_size = config.hidden_size
self.dropout_prob = dropout_prob
self.sigmoid = nn.Sigmoid()
self.relu = nn.ReLU()
self.bert_hidden_layers = bert_hidden_layers
self.mlp_config = mlp_config
self.task_name = config.finetuning_task
if "MTLAS" in config.finetuning_task:
self.regressors = nn.ModuleDict()
for dataset in task_dict:
self.regressors[dataset] = self.generate_regressors(device)
else:
self.regressor = self.generate_regressors(device)
def generate_regressors(self, device):
"""
:param device:
:return:
"""
last_dim = self.hidden_size * self.bert_hidden_layers
layers = []
if self.mlp_config == 1:
layers.append(nn.Linear(in_features=last_dim, out_features=512, bias=False))
# layers.append(nn.BatchNorm1d(num_features=512))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
last_dim = 512
elif self.mlp_config == 2:
layers.append(nn.Linear(in_features=last_dim, out_features=100, bias=False))
# layers.append(nn.BatchNorm1d(num_features=100))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
last_dim = 100
elif self.mlp_config == 3:
layers.append(nn.Linear(in_features=last_dim, out_features=512, bias=False))
# layers.append(nn.BatchNorm1d(num_features=512))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
layers.append(nn.Linear(in_features=512, out_features=100, bias=False))
# layers.append(nn.BatchNorm1d(num_features=100))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
last_dim = 100
else:
layers.append(nn.Linear(in_features=last_dim, out_features=512, bias=False))
# layers.append(nn.BatchNorm1d(num_features=512))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
layers.append(nn.Linear(in_features=512, out_features=256, bias=False))
# layers.append(nn.BatchNorm1d(num_features=256))
layers.append(nn.ReLU())
if self.dropout_prob is not None:
layers.append(nn.Dropout(p=self.dropout_prob))
last_dim = 256
layers.append(nn.Linear(in_features=last_dim, out_features=1, bias=True))
layers.append(nn.Sigmoid())
return nn.Sequential(*layers).to(device)
def get_or_create_regressor(self, dataset, device):
"""Creates or returns already created regressor for a task.
:param dataset:
Dataset being processed: gretz, toledo, swanson, UKPRank.
:param device:
Device (CPU or GPU) that will be used for training
:return: Regressor that will be used for the forward pass.
"""
if self.regressors[dataset] is not None:
return self.regressors[dataset]
else:
self.regressors[dataset] = self.generate_regressors(device)
return self.regressors[dataset]
def forward(
self,
bert_batch_input: BertBatchInput,
calculate_loss: bool = True
):
"""
Performs a forward pass. In particular, separates output logits for each dataset and uses a dataset-specific
regression head (multi-task learning). Outputs are separated and returned for each dataset.
:param bert_batch_input:
The input infos needed to perform a forward pass (dataset, features, ...)
:param calculate_loss:
To calculate the MSELoss between the logits and losses.
:returns:
BertBatchOutput
"""
single_dataset_outputs = []
for bert_batch_input in bert_batch_input.bert_single_dataset_inputs:
labels = bert_batch_input.labels
data_set = bert_batch_input.data_set
outputs = self.bert(
bert_batch_input.input_ids,
attention_mask=bert_batch_input.attention_mask,
)
hidden_states = outputs[-1]
out = torch.cat(tuple([hidden_states[-i] for i in range(1, self.bert_hidden_layers + 1)]), dim=-1)
# Pooling by also setting masked items to zero
bert_mask = bert_batch_input.attention_mask.unsqueeze(2)
# Multiply output with mask to only retain non-padding tokens
out = torch.mul(out, bert_mask)
# First item ['CLS'] is sentence representation
out = out[:, 0, :]
# Get the task-specific classifier
if "MTLAS" in self.task_name:
regressor = self.get_or_create_regressor(data_set, out.device)
else:
regressor = self.regressor
# Get the logits
logits = regressor(out)
if calculate_loss:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss = None
single_dataset_outputs.append(BertSingleDatasetOutput(labels=labels, logits=logits,
loss=loss, data_set=data_set))
return BertBatchOutput(single_dataset_outputs)
def forward_for_inference(
self,
bert_batch_input: BertBatchInput,
):
"""
:param bert_batch_input:
BertBatchInput containing input_ids, attention_mask and labels info.
:return:
modified BertBatchOutput
"""
per_batch_output = []
for bert_batch_input in bert_batch_input.bert_single_dataset_inputs:
labels = bert_batch_input.labels
dataset = bert_batch_input.data_set
outputs = self.bert(
bert_batch_input.input_ids,
attention_mask=bert_batch_input.attention_mask
)
hidden_states = outputs[-1]
out = torch.cat(tuple([hidden_states[-i] for i in range(1, self.bert_hidden_layers + 1)]), dim=-1)
# Pooling by also setting masked items to zero
bert_mask = bert_batch_input.attention_mask.unsqueeze(2)
# Multiply output with mask to only retain non-padding tokens
out = torch.mul(out, bert_mask)
# Extract First item ['CLS'] i.e. the sentence representation
out = out[:, 0, :]
logits = []
# run the out through each of the multi-regressor heads
for regressor in self.regressors:
regressor_unit = self.get_or_create_regressor(regressor, out.device)
# gets the logit value for this regressor and store in the list
logits.append(regressor_unit(out).to('cpu').numpy())
single_output = [labels, logits, dataset]
per_batch_output.append(single_output)
return per_batch_output
| The-obsrvr/ArgStrength | Hyper-parameter-optimization/src/modeling.py | modeling.py | py | 8,100 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "transformers.BertPreTrainedModel",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "transformers.BertModel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torch.nn.Sigmoid",
"line_number": 22,
"usage_type": "call"
},
{
"api_name... |
42768410408 | from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter, HTMLConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from collections import defaultdict
from io import BytesIO
import PyPDF2
import re
import json
from PDF_Keywords_Extractor import PdfOcr
from bs4 import BeautifulSoup
#-description
#-a quick hack/script to extract keywords from pdf documents
#-intended for single use
class KeywordExtract():
def __init__(self):
#- ToDo: will place all objects used here instead of recreating new ones every extraction
print(None)
def extract(self,path):
# - ToDo: spliting out both extract functions to only return pdf text, and create a specialise function to harvest the keywords
# -this function uses pypdf2/pdfminer(html)/pdfminer(text) to extract keywords
# Inputs:
# - [path : string] the string of the file
# Output:
# - [keywords : string] the longest list of keywords out of the 3 tools.
#-A list of keywords collected via different methods
listofkeywords = []
#-open file
file = open(path, 'rb')
#-pdfminer text
listofkeywords.append(self.findkeywords(self.pdfminer(file,"text").replace(' ',' ')))
#-pypdf2
listofkeywords.append(self.findkeywords(self.pypdf2(file).replace(' ',' ')))
#-pdfminer (HTML)
html = self.pdfminer(file,"html")
#parse using bs4
soup = BeautifulSoup(html, 'html5lib')
temp = 0
keyword = ''
#-loops through all spans
for span in soup.find_all('span', style=lambda x: x and ('Italic' or 'BoldItalic') in x):
if(temp == 1 and len(span.text) > 3):
keyword = span.text
break
if("keyword" in span.text.lower()):
temp = 1
file.close()
listofkeywords.append(keyword)
#-trims string
for i in range(0, len(listofkeywords)):
listofkeywords[i] = self.trimoutjunk(listofkeywords[i])
#- picks the largest string [greedy]
biggest = 0
for i in range(1,len(listofkeywords)):
if len(listofkeywords[biggest]) < len(listofkeywords[i]):
biggest = i
print(path + listofkeywords[biggest])
return listofkeywords[biggest]
#def writeKeywordsToFile(self):
def extract2(self,path):
# -Uses wand and tesseract to extract text
# Inputs:
# - [path : string] where the file is located
# Output:
# - [data : string] the extract text
pdf2text = PdfOcr.keywordpdftotext()
text = pdf2text.gettext(path)
text = self.findkeywords(text)
self.trimoutjunk(text)
return text
def pdfminer(self,file,type):
# -this function uses pdf miner to extract data in pdf documents
# Inputs:
# - [file : string] where the file is located
# - [type : string] what type of extraction to be used (text or html)
# Output:
# - [data : string] the extracted data in html or text format
resourceManager = PDFResourceManager()
codec = 'utf-8'
retStr = BytesIO()
laParams = LAParams()
if type == 'html':
device = HTMLConverter(resourceManager,retStr,codec=codec,laparams=laParams)
else:
device = TextConverter(resourceManager,retStr,codec=codec,laparams=laParams)
interpreter = PDFPageInterpreter(resourceManager,device)
password = ""
maxPages = 1
caching = True
pageNos= set()
for page in PDFPage.get_pages(file,pageNos,maxpages=maxPages, password = password, caching=caching,check_extractable= True):
interpreter.process_page(page)
device.close()
data = retStr.getvalue().decode()
retStr.close()
return data
def pypdf2(self,file):
# -this function uses pypdf2 to extract data from pdf file
# Inputs:
# - [file : string] where the file is located
# Output:
# - [data : string] the extracted data in text format
pyPdf = PyPDF2.PdfFileReader(file)
noPages = pyPdf.getNumPages()
page =pyPdf.getPage(0)
pageContent = page.extractText()
return pageContent
def findkeywords(self,text):
# -this function uses finds and seperates keywords from text
# Inputs:
# - [text : string] the extracted pdf text document
# Output:
# - [data : string] parsed text containing keywords (will require cleaning)
#indicators to continue building keywords
endchars = ['-', ',', ', ', ', ', ', ', '\t']
#-index of the start of keywords
startindex = text.lower().find("keywords:")
#--string contains no keywords (detects if recursive call)
if startindex == -1:
startindex = 0
else:
startindex = startindex + 9
#-index of end of keywords (when there is no more end chars
keywords = text.lower()[startindex:].split("\n")[0]
#--if the last char is one of the end chars continue building keywords, and doesnt contain a dot
if len(keywords) is not 0 and keywords[len(keywords)-1:] in endchars and keywords[len(keywords)-3:].count('.') is not 0:
keywords = keywords[:len(keywords)] + self.findkeywords(text[startindex + len(keywords) + 2:])
return keywords
def trimoutjunk(self,text):
# -this function cleans out junk from keywords (special cases, whitespace)
# Inputs:
# - [text : string] keywords to be cleaned
# Output:
# - [data : string] clearned keywords
temptext = text.lower()
end = []
#special cases
caseend = [' 22nd international congress', "\s*[0-9]*\s*.[0-9]*\s*introduction"]
#casestart = ["keywords:\s*(\w*\s*[,;]*)*."]
for case in caseend:
match = re.search(case, text)
if match:
end.append(match.start())
'''
for case in casesend:
match = re.search(case, text)
if match:
print(text[match.start():match.end() - 1])
'''
if len(end) is not 0:
text = text[0:min(end)]
#trim white space
cases = [' ', ' ', '\t', '\n']
for case in cases:
text.replace(case, '')
return text
def fileToDictionary(self, putHere, path):
#-read the download txt, and put into dictionary
#- where the key is the file name, and the value is the the actualy name
#linkFile = open("")
#-read in the keyword dump, and store keywords in a dictionary
#- where the key is the filename
putHere = defaultdict(list)
#file = None
try:
file = open(path,"r", encoding="utf-8")
except:
print("failed")
pass
for line in file:
# - split up the link and the key
if line != '\n':
print("1",line)
#split line by tab
tempLine = line.split("\t")
print("2",tempLine)
#split line by commas
if len(tempLine) > 1:
putHere[tempLine[0]] = re.split("; |,",tempLine[1])
for i in range(len(putHere[tempLine[0]])):
putHere[tempLine[0]][i] = putHere[tempLine[0]][i].strip()
putHere[tempLine[0]][i] = putHere[tempLine[0]][i].replace("\n","")
print("3",putHere[tempLine[0]])
return putHere
| CSIRO-enviro-informatics/modsim-keywords | PDF_Keywords_Extractor/PdfExtractor.py | PdfExtractor.py | py | 7,785 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "PDF_Keywords_Extractor.PdfOcr.keywordpdftotext",
"line_number": 84,
"usage_type": "call"
},
{
"api_name": "PDF_Keywords_Extractor.PdfOcr",
"line_number": 84,
"usage_type": "name"
... |
29550562784 | from pymongo import MongoClient
import json
#import numpy as np
puerto = 27017
#puerto = 50375
hostname = 'localhost'
def ConectToMongoDB(puerto, Hostname):
# Conexión a la base de datos
mongoClient = MongoClient(Hostname, puerto)
# Creamos la base de datos Pulseras
db = mongoClient.Pulseras
# Obtenemos una coleccion para trabajar con ella que la llamaremos archivos
collection = db.archivos
return collection
collection = ConectToMongoDB(puerto, hostname)
def InsertarRutaFichero(ruta_fichero):
insert = {"fichero": ruta_fichero}
# Insertamos la ruta del fichero en la colección
collection.insert_one(insert)
def InsertClassify(classification, timestamp,ruta_fichero):
query = {'fichero': ruta_fichero}
np_array_to_list = classification.tolist()
json_str = json.dumps(np_array_to_list)
json_timestamp = json.dumps(timestamp.tolist())
# Insertamos el resultado de la clasificación donde tenemos insertada la ruta de ese fichero
collection.update(query, {'$set': {'Classify': json_str,'timeStamp':json_timestamp}}, upsert=True) | palomadominguez/TFG-pulseras | src/MongoConection.py | MongoConection.py | py | 1,109 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 37,
"usage_type": "call"
}
] |
8445202168 | """Module for RBF interpolation."""
import math
import warnings
from itertools import combinations_with_replacement
import cupy as cp
# Define the kernel functions.
kernel_definitions = """
static __device__ double linear(double r)
{
return -r;
}
static __device__ float linear_f(float r)
{
return -r;
}
static __device__ double cubic(double r)
{
return r*r*r;
}
static __device__ float cubic_f(float r)
{
return r*r*r;
}
static __device__ double thin_plate_spline(double r)
{
if (r == 0.0) {
return 0.0;
}
else {
return r*r*log(r);
}
}
static __device__ float thin_plate_spline_f(float r)
{
if (r == 0.0) {
return 0.0;
}
else {
return r*r*log(r);
}
}
static __device__ double multiquadric(double r)
{
return -sqrt(r*r + 1);
}
static __device__ float multiquadric_f(float r)
{
return -sqrt(r*r + 1);
}
static __device__ double inverse_multiquadric(double r)
{
return 1.0 / sqrt(r*r + 1);
}
static __device__ float inverse_multiquadric_f(float r)
{
return 1.0 / sqrt(r*r + 1);
}
static __device__ double inverse_quadratic(double r)
{
return 1.0 / (r*r + 1);
}
static __device__ float inverse_quadrtic_f(float r)
{
return 1.0 / (r*r + 1);
}
static __device__ double gaussian(double r)
{
return exp(-r*r);
}
static __device__ float gaussian_f(float r)
{
return exp(-r*r);
}
static __device__ double quintic(double r)
{
double r2 = r*r;
return -r2*r2*r;
}
static __device__ float qunitic_f(float r)
{
float r2 = r*r;
return -r2*r2*r;
}
"""
linear = cp._core.create_ufunc(
'cupyx_scipy_interpolate_linear',
(('f->f', 'out0 = linear_f(in0)'),
'd->d'),
'out0 = linear(in0)',
preamble=kernel_definitions,
doc="""Linear kernel function.
``-r``
""",
)
cubic = cp._core.create_ufunc(
'cupyx_scipy_interpolate_cubic',
(('f->f', 'out0 = cubic_f(in0)'),
'd->d'),
'out0 = cubic(in0)',
preamble=kernel_definitions,
doc="""Cubic kernel function.
``r**3``
""",
)
thin_plate_spline = cp._core.create_ufunc(
'cupyx_scipy_interpolate_thin_plate_spline',
(('f->f', 'out0 = thin_plate_spline_f(in0)'),
'd->d'),
'out0 = thin_plate_spline(in0)',
preamble=kernel_definitions,
doc="""Thin-plate spline kernel function.
``r**2 * log(r) if r != 0 else 0``
""",
)
multiquadric = cp._core.create_ufunc(
'cupyx_scipy_interpolate_multiquadric',
(('f->f', 'out0 = multiquadric_f(in0)'),
'd->d'),
'out0 = multiquadric(in0)',
preamble=kernel_definitions,
doc="""Multiquadric kernel function.
``-sqrt(r**2 + 1)``
""",
)
inverse_multiquadric = cp._core.create_ufunc(
'cupyx_scipy_interpolate_inverse_multiquadric',
(('f->f', 'out0 = inverse_multiquadric_f(in0)'),
'd->d'),
'out0 = inverse_multiquadric(in0)',
preamble=kernel_definitions,
doc="""Inverse multiquadric kernel function.
``1 / sqrt(r**2 + 1)``
""",
)
inverse_quadratic = cp._core.create_ufunc(
'cupyx_scipy_interpolate_inverse_quadratic',
(('f->f', 'out0 = inverse_quadratic_f(in0)'),
'd->d'),
'out0 = inverse_quadratic(in0)',
preamble=kernel_definitions,
doc="""Inverse quadratic kernel function.
``1 / (r**2 + 1)``
""",
)
gaussian = cp._core.create_ufunc(
'cupyx_scipy_interpolate_gaussian',
(('f->f', 'out0 = gaussian_f(in0)'),
'd->d'),
'out0 = gaussian(in0)',
preamble=kernel_definitions,
doc="""Gaussian kernel function.
``exp(-r**2)``
""",
)
quintic = cp._core.create_ufunc(
'cupyx_scipy_interpolate_quintic',
(('f->f', 'out0 = quintic_f(in0)'),
'd->d'),
'out0 = quintic(in0)',
preamble=kernel_definitions,
doc="""Quintic kernel function.
``-r**5``
""",
)
NAME_TO_FUNC = {
"linear": linear,
"thin_plate_spline": thin_plate_spline,
"cubic": cubic,
"quintic": quintic,
"multiquadric": multiquadric,
"inverse_multiquadric": inverse_multiquadric,
"inverse_quadratic": inverse_quadratic,
"gaussian": gaussian
}
def kernel_matrix(x, kernel_func, out):
"""Evaluate RBFs, with centers at `x`, at `x`."""
delta = x[None, :, :] - x[:, None, :]
out[...] = kernel_func(cp.linalg.norm(delta, axis=-1))
# The above is equivalent to the original semi-scalar version:
# for j in range(i+1):
# out[i, j] = kernel_func(cp.linalg.norm(x[i] - x[j]))
# out[j, i] = out[i, j]
def polynomial_matrix(x, powers, out):
"""Evaluate monomials, with exponents from `powers`, at `x`."""
pwr = x[:, None, :] ** powers[None, :, :]
cp.prod(pwr, axis=-1, out=out)
# The above is equivalent to the following loop
# for i in range(x.shape[0]):
# for j in range(powers.shape[0]):
# out[i, j] = cp.prod(x[i]**powers[j])
def _build_system(y, d, smoothing, kernel, epsilon, powers):
"""Build the system used to solve for the RBF interpolant coefficients.
Parameters
----------
y : (P, N) float ndarray
Data point coordinates.
d : (P, S) float ndarray
Data values at `y`.
smoothing : (P,) float ndarray
Smoothing parameter for each data point.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
Returns
-------
lhs : (P + R, P + R) float ndarray
Left-hand side matrix.
rhs : (P + R, S) float ndarray
Right-hand side matrix.
shift : (N,) float ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
"""
p = d.shape[0]
s = d.shape[1]
r = powers.shape[0]
kernel_func = NAME_TO_FUNC[kernel]
# Shift and scale the polynomial domain to be between -1 and 1
mins = cp.min(y, axis=0)
maxs = cp.max(y, axis=0)
shift = (maxs + mins)/2
scale = (maxs - mins)/2
# The scale may be zero if there is a single point or all the points have
# the same value for some dimension. Avoid division by zero by replacing
# zeros with ones.
scale[scale == 0.0] = 1.0
yeps = y * epsilon
yhat = (y - shift)/scale
# Transpose to make the array fortran contiguous. This is required for
# dgesv to not make a copy of lhs.
lhs = cp.empty((p + r, p + r), dtype=float).T
kernel_matrix(yeps, kernel_func, lhs[:p, :p])
polynomial_matrix(yhat, powers, lhs[:p, p:])
lhs[p:, :p] = lhs[:p, p:].T
lhs[p:, p:] = 0.0
for i in range(p):
lhs[i, i] += smoothing[i]
# Transpose to make the array fortran contiguous.
rhs = cp.empty((s, p + r), dtype=float).T
rhs[:p] = d
rhs[p:] = 0.0
return lhs, rhs, shift, scale
def _build_evaluation_coefficients(x, y, kernel, epsilon, powers,
shift, scale):
"""Construct the coefficients needed to evaluate
the RBF.
Parameters
----------
x : (Q, N) float ndarray
Evaluation point coordinates.
y : (P, N) float ndarray
Data point coordinates.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
shift : (N,) float ndarray
Shifts the polynomial domain for numerical stability.
scale : (N,) float ndarray
Scales the polynomial domain for numerical stability.
Returns
-------
(Q, P + R) float ndarray
"""
q = x.shape[0]
p = y.shape[0]
r = powers.shape[0]
kernel_func = NAME_TO_FUNC[kernel]
yeps = y*epsilon
xeps = x*epsilon
xhat = (x - shift)/scale
vec = cp.empty((q, p + r), dtype=float)
# Evaluate RBFs, with centers at `y`, at the point `x`.
delta = xeps[:, None, :] - yeps[None, :, :]
vec[:, :p] = kernel_func(cp.linalg.norm(delta, axis=-1))
# Evaluate monomials, with exponents from `powers`, at the point `x`.
pwr = xhat[:, None, :]**powers[None, :, :]
vec[:, p:] = cp.prod(pwr, axis=-1)
# for i in range(q):
# polynomial_vector(xhat[i], powers, vec[i, p:])
return vec
###############################################################################
# These RBFs are implemented.
_AVAILABLE = {
"linear",
"thin_plate_spline",
"cubic",
"quintic",
"multiquadric",
"inverse_multiquadric",
"inverse_quadratic",
"gaussian"
}
# The shape parameter does not need to be specified when using these RBFs.
_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"}
# For RBFs that are conditionally positive definite of order m, the interpolant
# should include polynomial terms with degree >= m - 1. Define the minimum
# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree
# Approximation Methods with MATLAB". The RBFs that are not in this dictionary
# are positive definite and do not need polynomial terms.
_NAME_TO_MIN_DEGREE = {
"multiquadric": 0,
"linear": 0,
"thin_plate_spline": 1,
"cubic": 1,
"quintic": 2
}
try:
_comb = math.comb
except AttributeError:
# Naive combination for Python 3.7
def _comb(n, k):
return math.factorial(n) // (math.factorial(n - k) * math.factorial(k))
def _monomial_powers(ndim, degree):
"""Return the powers for each monomial in a polynomial.
Parameters
----------
ndim : int
Number of variables in the polynomial.
degree : int
Degree of the polynomial.
Returns
-------
(nmonos, ndim) int ndarray
Array where each row contains the powers for each variable in a
monomial.
"""
nmonos = _comb(degree + ndim, ndim)
out = cp.zeros((nmonos, ndim), dtype=int)
count = 0
for deg in range(degree + 1):
for mono in combinations_with_replacement(range(ndim), deg):
# `mono` is a tuple of variables in the current monomial with
# multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2)
for var in mono:
out[count, var] += 1
count += 1
return out
def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers):
"""Build and solve the RBF interpolation system of equations.
Parameters
----------
y : (P, N) float ndarray
Data point coordinates.
d : (P, S) float ndarray
Data values at `y`.
smoothing : (P,) float ndarray
Smoothing parameter for each data point.
kernel : str
Name of the RBF.
epsilon : float
Shape parameter.
powers : (R, N) int ndarray
The exponents for each monomial in the polynomial.
Returns
-------
coeffs : (P + R, S) float ndarray
Coefficients for each RBF and monomial.
shift : (N,) float ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
"""
lhs, rhs, shift, scale = _build_system(
y, d, smoothing, kernel, epsilon, powers
)
coeffs = cp.linalg.solve(lhs, rhs)
return shift, scale, coeffs
class RBFInterpolator:
"""Radial basis function (RBF) interpolation in N dimensions.
Parameters
----------
y : (P, N) array_like
Data point coordinates.
d : (P, ...) array_like
Data values at `y`.
neighbors : int, optional
If specified, the value of the interpolant at each evaluation point
will be computed using only this many nearest data points. All the data
points are used by default.
smoothing : float or (P,) array_like, optional
Smoothing parameter. The interpolant perfectly fits the data when this
is set to 0. For large values, the interpolant approaches a least
squares fit of a polynomial with the specified degree. Default is 0.
kernel : str, optional
Type of RBF. This should be one of
- 'linear' : ``-r``
- 'thin_plate_spline' : ``r**2 * log(r)``
- 'cubic' : ``r**3``
- 'quintic' : ``-r**5``
- 'multiquadric' : ``-sqrt(1 + r**2)``
- 'inverse_multiquadric' : ``1/sqrt(1 + r**2)``
- 'inverse_quadratic' : ``1/(1 + r**2)``
- 'gaussian' : ``exp(-r**2)``
Default is 'thin_plate_spline'.
epsilon : float, optional
Shape parameter that scales the input to the RBF. If `kernel` is
'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to
1 and can be ignored because it has the same effect as scaling the
smoothing parameter. Otherwise, this must be specified.
degree : int, optional
Degree of the added polynomial. For some RBFs the interpolant may not
be well-posed if the polynomial degree is too small. Those RBFs and
their corresponding minimum degrees are
- 'multiquadric' : 0
- 'linear' : 0
- 'thin_plate_spline' : 1
- 'cubic' : 1
- 'quintic' : 2
The default value is the minimum degree for `kernel` or 0 if there is
no minimum degree. Set this to -1 for no added polynomial.
Notes
-----
An RBF is a scalar valued function in N-dimensional space whose value at
:math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c`
is the center of the RBF.
An RBF interpolant for the vector of data values :math:`d`, which are from
locations :math:`y`, is a linear combination of RBFs centered at :math:`y`
plus a polynomial with a specified degree. The RBF interpolant is written
as
.. math::
f(x) = K(x, y) a + P(x) b,
where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y`
evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of
monomials, which span polynomials with the specified degree, evaluated at
:math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the
linear equations
.. math::
(K(y, y) + \\lambda I) a + P(y) b = d
and
.. math::
P(y)^T a = 0,
where :math:`\\lambda` is a non-negative smoothing parameter that controls
how well we want to fit the data. The data are fit exactly when the
smoothing parameter is 0.
The above system is uniquely solvable if the following requirements are
met:
- :math:`P(y)` must have full column rank. :math:`P(y)` always has full
column rank when `degree` is -1 or 0. When `degree` is 1,
:math:`P(y)` has full column rank if the data point locations are not
all collinear (N=2), coplanar (N=3), etc.
- If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline',
'cubic', or 'quintic', then `degree` must not be lower than the
minimum value listed above.
- If `smoothing` is 0, then each data point location must be distinct.
When using an RBF that is not scale invariant ('multiquadric',
'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate
shape parameter must be chosen (e.g., through cross validation). Smaller
values for the shape parameter correspond to wider RBFs. The problem can
become ill-conditioned or singular when the shape parameter is too small.
The memory required to solve for the RBF interpolation coefficients
increases quadratically with the number of data points, which can become
impractical when interpolating more than about a thousand data points.
To overcome memory limitations for large interpolation problems, the
`neighbors` argument can be specified to compute an RBF interpolant for
each evaluation point using only the nearest data points.
See Also
--------
scipy.interpolate.RBFInterpolator
"""
def __init__(self, y, d,
neighbors=None,
smoothing=0.0,
kernel="thin_plate_spline",
epsilon=None,
degree=None):
y = cp.asarray(y, dtype=float, order="C")
if y.ndim != 2:
raise ValueError("`y` must be a 2-dimensional array.")
ny, ndim = y.shape
d_dtype = complex if cp.iscomplexobj(d) else float
d = cp.asarray(d, dtype=d_dtype, order="C")
if d.shape[0] != ny:
raise ValueError(
f"Expected the first axis of `d` to have length {ny}."
)
d_shape = d.shape[1:]
d = d.reshape((ny, -1))
# If `d` is complex, convert it to a float array with twice as many
# columns. Otherwise, the LHS matrix would need to be converted to
# complex and take up 2x more memory than necessary.
d = d.view(float)
isscalar = cp.isscalar(smoothing) or smoothing.shape == ()
if isscalar:
smoothing = cp.full(ny, smoothing, dtype=float)
else:
smoothing = cp.asarray(smoothing, dtype=float, order="C")
if smoothing.shape != (ny,):
raise ValueError(
"Expected `smoothing` to be a scalar or have shape "
f"({ny},)."
)
kernel = kernel.lower()
if kernel not in _AVAILABLE:
raise ValueError(f"`kernel` must be one of {_AVAILABLE}.")
if epsilon is None:
if kernel in _SCALE_INVARIANT:
epsilon = 1.0
else:
raise ValueError(
"`epsilon` must be specified if `kernel` is not one of "
f"{_SCALE_INVARIANT}."
)
else:
epsilon = float(epsilon)
min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1)
if degree is None:
degree = max(min_degree, 0)
else:
degree = int(degree)
if degree < -1:
raise ValueError("`degree` must be at least -1.")
elif degree < min_degree:
warnings.warn(
f"`degree` should not be below {min_degree} when `kernel` "
f"is '{kernel}'. The interpolant may not be uniquely "
"solvable, and the smoothing parameter may have an "
"unintuitive effect.",
UserWarning
)
if neighbors is None:
nobs = ny
else:
raise NotImplementedError("neighbors is not implemented yet")
# Make sure the number of nearest neighbors used for interpolation
# does not exceed the number of observations.
neighbors = int(min(neighbors, ny))
nobs = neighbors
powers = _monomial_powers(ndim, degree)
# The polynomial matrix must have full column rank in order for the
# interpolant to be well-posed, which is not possible if there are
# fewer observations than monomials.
if powers.shape[0] > nobs:
raise ValueError(
f"At least {powers.shape[0]} data points are required when "
f"`degree` is {degree} and the number of dimensions is {ndim}."
)
if neighbors is None:
shift, scale, coeffs = _build_and_solve_system(
y, d, smoothing, kernel, epsilon, powers
)
# Make these attributes private since they do not always exist.
self._shift = shift
self._scale = scale
self._coeffs = coeffs
else:
raise NotImplementedError
# self._tree = KDTree(y)
self.y = y
self.d = d
self.d_shape = d_shape
self.d_dtype = d_dtype
self.neighbors = neighbors
self.smoothing = smoothing
self.kernel = kernel
self.epsilon = epsilon
self.powers = powers
def _chunk_evaluator(self, x, y, shift, scale, coeffs,
memory_budget=1000000):
"""
Evaluate the interpolation.
Parameters
----------
x : (Q, N) float ndarray
array of points on which to evaluate
y: (P, N) float ndarray
array of points on which we know function values
shift: (N, ) ndarray
Domain shift used to create the polynomial matrix.
scale : (N,) float ndarray
Domain scaling used to create the polynomial matrix.
coeffs: (P+R, S) float ndarray
Coefficients in front of basis functions
Returns
-------
(Q, S) float ndarray
Interpolated array
"""
nx, ndim = x.shape
nnei = len(y)
# in each chunk we consume the same space we already occupy
chunksize = memory_budget // ((self.powers.shape[0] + nnei)) + 1
if chunksize <= nx:
out = cp.empty((nx, self.d.shape[1]), dtype=float)
for i in range(0, nx, chunksize):
vec = _build_evaluation_coefficients(
x[i:i + chunksize, :],
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale)
out[i:i + chunksize, :] = cp.dot(vec, coeffs)
else:
vec = _build_evaluation_coefficients(
x,
y,
self.kernel,
self.epsilon,
self.powers,
shift,
scale)
out = cp.dot(vec, coeffs)
return out
def __call__(self, x):
"""Evaluate the interpolant at `x`.
Parameters
----------
x : (Q, N) array_like
Evaluation point coordinates.
Returns
-------
(Q, ...) ndarray
Values of the interpolant at `x`.
"""
x = cp.asarray(x, dtype=float, order="C")
if x.ndim != 2:
raise ValueError("`x` must be a 2-dimensional array.")
nx, ndim = x.shape
if ndim != self.y.shape[1]:
raise ValueError("Expected the second axis of `x` to have length "
f"{self.y.shape[1]}.")
# Our memory budget for storing RBF coefficients is
# based on how many floats in memory we already occupy
# If this number is below 1e6 we just use 1e6
# This memory budget is used to decide how we chunk
# the inputs
memory_budget = max(x.size + self.y.size + self.d.size, 1000000)
if self.neighbors is None:
out = self._chunk_evaluator(
x,
self.y,
self._shift,
self._scale,
self._coeffs, memory_budget=memory_budget)
else:
raise NotImplementedError # XXX: needs KDTree
out = out.view(self.d_dtype)
out = out.reshape((nx, ) + self.d_shape)
return out
| cupy/cupy | cupyx/scipy/interpolate/_rbfinterp.py | _rbfinterp.py | py | 23,197 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy._core.create_ufunc",
"line_number": 113,
"usage_type": "call"
},
{
"api_name": "cupy._core",
"line_number": 113,
"usage_type": "attribute"
},
{
"api_name": "cupy._core.create_ufunc",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "cu... |
70447210985 | import os
import typer
from src.core.config import settings
from src.collection.building import collect_building
from src.collection.poi import collect_poi
from src.collection.landuse import collect_landuse
from src.collection.network import collect_network
from src.collection.network_pt import collect_network_pt
from src.preparation.network import prepare_network
from src.preparation.poi import prepare_poi
from src.preparation.network import export_network
from src.preparation.network_pt import prepare_network_pt
from src.preparation.building import prepare_building
from src.preparation.poi import export_poi
from src.preparation.public_transport_stop import prepare_public_transport_stop
from src.preparation.population import prepare_population
from src.preparation.gtfs import prepare_gtfs
from src.preparation.gtfs import export_gtfs
from src.migration.gtfs import migrate_gtfs
from src.utils.utils import print_hashtags, print_info
from src.db.db import Database
app = typer.Typer()
db = Database(settings.LOCAL_DATABASE_URI)
db_rd = Database(settings.RAW_DATABASE_URI)
# TODO: Add prepare_landuse, export_building, export_landuse, export_population
action_dict = {
"collection": {
"building": collect_building,
"poi": collect_poi,
"landuse": collect_landuse,
"network": collect_network,
"network_pt": collect_network_pt
},
"preparation": {
"poi": prepare_poi,
"network": prepare_network,
"network_pt": prepare_network_pt,
"building": prepare_building,
"public_transport_stop": prepare_public_transport_stop,
"population": prepare_population,
"gtfs": prepare_gtfs,
},
"export": {"poi": export_poi, "network": export_network, "gtfs": export_gtfs},
"migration": {"gtfs": migrate_gtfs},
}
def check_input(actions: list[str], datasets: list[str]) -> bool:
"""Check if input is valid.
Args:
actions (list[str]): Actions to perform.
datasets (list[str]): Datasets to perform actions on.
Raises:
typer.Abort: If action is not supported.
Returns:
bool: True if input is valid.
"""
# Check if action in action_dict keys
for action in actions:
if action not in action_dict.keys():
typer.echo(f"Action {action} is not supported.")
raise typer.Abort()
# Check if dataset supports action if not print that dataset does not support action but continue
for action in actions:
for dataset in datasets:
if dataset not in action_dict[action].keys():
typer.echo(f"Dataset {dataset} does not support action {action}.")
return True
def check_config_file_exists(data_set: str, region: str) -> bool:
"""Check if the configuration file exists."""
config_path = os.path.join(
settings.CONFIG_DIR,
"data_variables",
data_set,
data_set + "_" + region + ".yaml",
)
if not os.path.isfile(config_path):
typer.echo(f"Configuration file {config_path} does not exist.")
raise typer.Abort()
return True
@app.command()
def run(
actions: str = typer.Option(None, "--actions", "-a"),
region: str = typer.Option(None, "--region", "-r"),
data_sets: str = typer.Option(None, "--datasets", "-d"),
):
"""Orchestrate the data preparation process."""
all_actions = actions.split(",")
data_sets = data_sets.split(",")
# Check if all data sets are valid
check_input(actions=all_actions, datasets=data_sets)
# Loop through actions dicts and check if action and dataset are requested. If so, compute
for action in all_actions:
for dataset in data_sets:
if dataset in action_dict[action].keys() and action in action_dict.keys():
print_hashtags()
if region is not None:
print_info(f"Performing {action} on {dataset} for region <{region}>")
else:
print_info(f"Performing {action} on {dataset}")
print_hashtags()
if region is not None:
check_config_file_exists(data_set=dataset, region=region)
action_dict[action][dataset](region=region)
else:
action_dict[action][dataset]()
if __name__ == "__main__":
app()
| goat-community/data_preparation | manage.py | manage.py | py | 4,391 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typer.Typer",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "src.db.db.Database",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "src.core.config.settings.LOCAL_DATABASE_URI",
"line_number": 25,
"usage_type": "attribute"
},
{
"a... |
30371792823 | import streamlit as st
from EmailSender import EmailSender
from models.client.client import CreateClient
from models.client.message import CreateMessage
from models.client.template import CreateTemplate
from pages.group_send import run_group_send
from pages.single_send import run_single_send
from pages.manage_client import run_manage_client
from pages.manage_template import run_manage_template
def run_email_app(app_type):
sender_email = "contact@kingvpn.fr"
sender_password = "Bizerte7000"
path = '/home/anisse9/vpn/'
email_sender = EmailSender(sender_email, sender_password)
#bdd = CRMDatabase()
message = CreateMessage()
client = CreateClient()
template = CreateTemplate()
if app_type == "Group send":
run_group_send(email_sender, client, template)
elif app_type == "Single send":
run_single_send(email_sender, template)
elif app_type == "Manage Client":
run_manage_client( client)
elif app_type == "Manage Template":
run_manage_template( template)
def main():
st.title("Sending emails with OVH")
add_selectbox = st.sidebar.selectbox(
"Choose your goal",
("Group send", "Single send", "Manage Client","Manage Template")
)
if add_selectbox == "Group send":
run_email_app("Group send")
elif add_selectbox == "Single send":
run_email_app("Single send")
elif add_selectbox == "Manage Client":
run_email_app("Manage Client")
elif add_selectbox == "Manage Template":
run_email_app("Manage Template")
if __name__ == "__main__":
main()
| helmi75/SendMegaEmailsOVH | App/main.py | main.py | py | 1,610 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "EmailSender.EmailSender",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "models.client.message.CreateMessage",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models.client.client.CreateClient",
"line_number": 21,
"usage_type": "call"
... |
21362127557 | # step1_unpack.py
"""Read the GEMS output directly from .tar archives, compile it into a single
data set of size (num_variables * domain_size)x(num_snapshots), and save it
in HDF5 format.
The GEMS data for this project collects snapshots of the following variables.
* Pressure
* x-velocity
* y-velocity
* Temperature
* CH4 mass fraction
* O2 mass fraction
* H2O mass fraction
* CO2 mass fraction
Each variable is discretized over a domain with 38,523 degrees of freedom
(DOF), so each snapshot has 8 * 38,523 = 308,184 entries.
Examples
--------
# Process the raw .tar data files that are placed in /storage/combustion/.
$ python3 step1_unpack.py /storage/combustion
# Process the raw .tar data files that are placed in the current directory,
# overwriting the resulting HDF5 file if it already exists.
$ python3 step1_unpack.py . --overwrite
# Process the raw .tar data files in /storage/combustion/ serially
# (not in parallel, which is the default).
$ python3 step1_unpack.py /storage/combustion --serial
Loading Results
---------------
>>> import utils
>>> gems_data, time_domain = utils.load_gems_data()
Command Line Arguments
----------------------
"""
import os
import re
import glob
import h5py
import shutil
import logging
import tarfile
import numpy as np
import multiprocessing as mp
import config
import utils
# Regular expressions
_SIMTIME = re.compile(r"_(\d+).dat") # Simulation time from file name
_HEADEREND = re.compile(r"DT=.*?\n") # Last line in .dat headers
_ELEMENTS = re.compile(r"Elements=(\d+)") # DOF listed in .dat headers
def _read_tar_and_save_data(tfile, start, stop, parallel=True):
"""Read snapshot data directly from a .tar archive (without untar-ing it)
and copy the data to the snapshot matrix HDF5 file config.GEMS_DATA_FILE.
Parameters
----------
tfile : str
Name of a .tar file to read data from.
start : int
Index of the first snapshot contained in the .tar file.
stop : int
Index of the last snapshot contained in the .tar file.
parallel : bool
If True, then only print progress if start == 0 and lock / unlock
when writing to the HDF5 file.
"""
# Allocate space for the snapshots in this .tar file.
num_snapshots = stop - start
gems_data = np.empty((config.DOF*config.NUM_GEMSVARS, num_snapshots),
dtype=np.float64)
times = np.empty(num_snapshots, dtype=np.float64)
# Extract the data from the .tar file.
with tarfile.open(tfile, 'r') as archive:
for j,tarinfo in enumerate(archive):
# Read the contents of one file.
with archive.extractfile(tarinfo) as datfile:
contents = datfile.read().decode()
# Get the simulation time from the file name.
simtime = float(_SIMTIME.findall(tarinfo.name)[0]) * config.DT
# Parse and verify the header.
header_end = _HEADEREND.findall(contents)[0]
headersize = contents.find(header_end) + len(header_end)
if int(_ELEMENTS.findall(contents[:headersize])[0]) != config.DOF:
raise RuntimeError(f"{tarinfo.name} DOF != config.DOF")
# Extract and store the variable data.
data = contents[headersize:].split()[:gems_data.shape[0]],
gems_data[:,j] = np.array(data, dtype=np.float64)
times[j] = simtime
if start == 0 or not parallel:
print(f"\rProcessed file {j+1:05d}/{num_snapshots}",
end='', flush=True)
if start == 0 or not parallel:
print()
# Save the data to the appropriate slice.
save_path = config.gems_data_path()
if parallel:
lock.acquire() # Only allow one process to open the file at a time.
with utils.timed_block(f"Saving snapshots {start}-{stop} to HDF5"):
with h5py.File(save_path, 'a') as hf:
hf["data"][:,start:stop] = gems_data
hf["time"][start:stop] = times
print(f"Data saved to {save_path}.", flush=True)
if parallel:
lock.release() # Let other processes resume.
def _globalize_lock(L):
global lock
lock = L
def main(data_folder, overwrite=False, serial=False):
"""Extract snapshot data, in parallel, from the .tar files in the
specified folder of the form Data_<first-snapshot>to<last-snapshot>.tar.
Parameters
----------
data_folder : str
Path to the folder that contains the raw GEMS .tar data files,
preferably as an absolute path (e.g., /path/to/folder).
overwrite : bool
If False and the snapshot matrix file exists, raise an error.
If True, overwrite the existing snapshot matrix file if it exists.
serial : bool
If True, do the unpacking sequentially in 10,000 snapshot chunks.
If False, do the unpacking in parallel with 10,000 snapshot chunks.
"""
utils.reset_logger()
# If it exists, copy the grid file to the Tecplot data directory.
source = os.path.join(data_folder, config.GRID_FILE)
if os.path.isfile(source):
target = config.grid_data_path()
with utils.timed_block(f"Copying {source} to {target}"):
shutil.copy(source, target)
else:
logging.warning(f"Grid file {source} not found!")
# Locate and sort raw .tar files.
target_pattern = os.path.join(data_folder, "Data_*to*.tar")
tarfiles = sorted(glob.glob(target_pattern))
if not tarfiles:
raise FileNotFoundError(target_pattern)
# Get the snapshot indices corresponding to each file from the file names.
starts, stops = [], []
for i,tfile in enumerate(tarfiles):
matches = re.findall(r"Data_(\d+)to(\d+).tar", tfile)
if not matches:
raise ValueError(f"file {tfile} not named with convention "
"Data_<first-snapshot>to<last-snapshot>.tar")
start, stop = [int(d) for d in matches[0]]
if i == 0:
start0 = start # Offset
starts.append(start - start0)
stops.append(stop + 1 - start0)
if i > 0 and stops[i-1] != starts[i]:
raise ValueError(f"file {tfile} not continuous from previous set")
num_snapshots = stops[-1]
# Create an empty HDF5 file of appropriate size for the data.
save_path = config.gems_data_path()
if os.path.isfile(save_path) and not overwrite:
raise FileExistsError(f"{save_path} (use --overwrite to overwrite)")
with utils.timed_block("Initializing HDF5 file for data"):
with h5py.File(save_path, 'w') as hf:
hf.create_dataset("data", shape=(config.DOF*config.NUM_GEMSVARS,
num_snapshots),
dtype=np.float64)
hf.create_dataset("time", shape=(num_snapshots,), dtype=np.float64)
logging.info(f"Data file initialized as {save_path}.")
# Read the files in chunks.
args = zip(tarfiles, starts, stops)
if serial: # Read the files serially (sequentially).
for tf, start, stop in args:
_read_tar_and_save_data(tf, start, stop, parallel=False)
else: # Read the files in parallel.
with mp.Pool(initializer=_globalize_lock, initargs=(mp.Lock(),),
processes=min([len(tarfiles), mp.cpu_count()])) as pool:
pool.starmap(_read_tar_and_save_data, args)
# =============================================================================
if __name__ == '__main__':
# Set up command line argument parsing.
import argparse
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.usage = f""" python3 {__file__} --help
python3 {__file__} DATAFOLDER [--overwrite] [--serial]"""
parser.add_argument("datafolder", type=str,
help="folder containing the raw GEMS .tar data files")
parser.add_argument("--overwrite", action="store_true",
help="overwrite the existing HDF5 data file")
parser.add_argument("--serial", action="store_true",
help="do the unpacking sequentially, not in parallel")
# Do the main routine.
args = parser.parse_args()
main(args.datafolder, args.overwrite, args.serial)
| a04051127/ROM-OpInf-Test | step1_unpack.py | step1_unpack.py | py | 8,380 | python | en | code | null | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "numpy.empty",
"line_number": 79,... |
86452991700 | import pandas as pd
import numpy as np
import scanpy as sc
import decoupler as dc
import anndata as ad
import matplotlib.pyplot as plt
import matplotlib.backends.backend_pdf
import argparse
# Init args
parser = argparse.ArgumentParser()
parser.add_argument('-m','--meta_path', required=True)
parser.add_argument('-p','--plot_path', required=True)
args = vars(parser.parse_args())
meta_path = args['meta_path']
plot_path = args['plot_path']
def read_slide(sample_id):
slide = sc.read_h5ad('data/prc/vs/{0}/adata.h5ad'.format(sample_id))
slide.obsm['props'] = pd.read_csv('data/prc/vs/{0}/props.csv'.format(sample_id), index_col=0)
slide.obs['niches'] = pd.read_csv('data/prc/vs/{0}/niches.csv'.format(sample_id), index_col=0)
slide.obs_names = [o + '|' + sample_id for o in slide.obs_names]
return slide
# Read meta
meta = pd.read_csv(meta_path)
# Read slides
vs_samples = meta[~meta['Batch vs'].isnull()]['Sample id'].values.astype('U')
adata = []
for vs_sample in vs_samples:
print(vs_sample)
slide = read_slide(vs_sample)
adata.append(slide)
adata = ad.concat(adata, join='outer')
niches = ['GM', 'WM', 'PPWM', 'LR', 'LC', 'VI']
adata = adata[np.isin(adata.obs['niches'].astype('U'), niches)].copy()
adata.obs['niches'] = pd.Categorical(adata.obs['niches'], categories=niches)
markers = dict(
GM=['SYT1', 'CEND1', 'BRSK2'],
WM=['MBP', 'PLP1', 'CNP'],
PPWM=['SUN2', 'BOK', 'SOX10'],
LR=['CSF1R', 'APOC1', 'CHI3L1'],
LC=['GJA1', 'SORBS1', 'DTNA'],
VI=['VIM', 'CLDN5', 'VWF']
)
fig1 = sc.pl.dotplot(
adata,
markers,
groupby='niches',
standard_scale='var',
dendrogram=False,
categories_order=niches,
figsize=(9, 3),
cmap='Reds',
size_title='Fraction of spots\n in group (%)',
return_fig=True
)
fig1.show()
fig1.fig.set_dpi(150)
fig1 = fig1.fig
vars = ['NEU', 'OPC', 'OL', 'MG', 'AS', 'TC', 'SC', 'EC', 'BC']
pdata = dc.get_pseudobulk(
adata=dc.get_acts(adata, 'props'),
sample_col='Sample id',
groups_col='niches',
mode='mean',
min_cells=0,
min_counts=0
)
df = dc.rank_sources_groups(
adata=pdata,
groupby='niches',
method='wilcoxon'
)
df = df[(df['pvals_adj'] < 0.05) & (df['statistic'] > 0)].reset_index(drop=True)
fig2 = sc.pl.matrixplot(
pdata,
vars,
groupby='niches',
cmap='Purples',
standard_scale='var',
categories_order=niches,
figsize=(6, 3),
colorbar_title='Scaled mean proportion',
return_fig=True
)
fig2.show()
fig2.fig.set_dpi(150)
fig2 = fig2.fig
# Save to pdf
pdf = matplotlib.backends.backend_pdf.PdfPages(plot_path)
for fig in [fig1, fig2]:
pdf.savefig(fig, bbox_inches='tight')
pdf.close()
| saezlab/VisiumMS | workflow/scripts/figures/fig1/niches_markers.py | niches_markers.py | py | 2,695 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scanpy.read_h5ad",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_... |
73478421545 | import pygame
import random
from pygame.math import Vector2
from triangle import Triangle
from settings import *
from colors import *
class Sierpinski:
def __init__(self):
self.width = WIDTH
self.height = HEIGHT
self.xoff = X_OFF
self.yoff = Y_OFF
self.gameWinWidth = GAMEWIN_WIDTH
self.gameWinHeight = GAMEWIN_HEIGHT
self.fps = FPS
self.clock = None
self.titleFont = None
self.grid = None
self.win = None
self.gameWin = None
self.gameWinRect = None
self.counter = 0
self.level = 0
self.maxLevel = MAXLEVEL
self.triangles = []
self.mainTriangle = None
def grid_init(self):
pygame.init()
pygame.font.init()
self.win = pygame.display.set_mode((self.width, self.height))
pygame.display.set_caption(TITLE)
self.gameWinRect = pygame.Rect(self.xoff, self.yoff, self.gameWinWidth, self.gameWinHeight)
self.gameWin = self.win.subsurface(self.gameWinRect)
self.win.fill(MID_BLACK)
self.gameWin.fill(BLACK)
self.titleFont = pygame.font.SysFont(TITLE_FONT, FONT_SIZE)
title = self.titleFont.render(TITLE, 1, GOLD)
w, h = title.get_size()
blitX = (self.width - w) // 2
blitY = (self.yoff - h) // 2
self.win.blit(title, (blitX, blitY))
self.clock = pygame.time.Clock()
margin = int(GAMEWIN_WIDTH * 0.05)
pointA = Vector2(GAMEWIN_WIDTH // 2, margin)
pointB = Vector2(margin, GAMEWIN_HEIGHT - margin)
pointC = Vector2(GAMEWIN_WIDTH - margin, GAMEWIN_HEIGHT - margin)
self.mainTriangle = Triangle(pointA, pointB, pointC)
self.triangles.append(self.mainTriangle)
pygame.display.update()
def close(self):
pygame.font.quit()
pygame.quit()
def draw(self):
self.mainTriangle.draw(self.gameWin)
pygame.display.update()
def add_level(self):
# print("adding level: ")
for i in range(len(self.triangles)):
triangle = self.triangles.pop(0)
triangle.create_children()
self.triangles.extend(triangle.getChildren())
print(len(self.triangles))
def run(self):
if not pygame.display.init():
self.grid_init()
run = True
while run:
self.clock.tick(self.fps)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if event.type == pygame.KEYDOWN:
pressed = pygame.key.get_pressed()
if pressed[pygame.K_SPACE]:
self.add_level()
self.draw()
self.close()
if __name__ == "__main__":
print("Hello, World!")
X = Sierpinski()
X.run() | Deadshot96/sierpinski-triangle | main.py | main.py | py | 3,079 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pygame.font.init",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pygame.font",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode... |
15768186318 | """Test cases for the kallisto methods."""
import numpy as np
import pytest
from kallisto.units import Bohr
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import CalcNumAtoms
from tests.store import pyridine
from jazzy.core import get_charges_from_kallisto_molecule
from jazzy.core import kallisto_molecule_from_rdkit_molecule
from jazzy.core import rdkit_molecule_from_smiles
from jazzy.utils import KallistoError
def test_kallisto_charges_are_correct_from_molecule():
"""It calculates the correct atomic EEQ partial charges."""
want = [0.04352191, -0.0924591]
# build molecule from store
m = pyridine()
charge = 0
eeq = m.get_eeq(charge)
assert np.isclose(eeq[0], want[0])
assert np.isclose(eeq[1], want[1])
def test_kallisto_charges_are_correct_from_wrapper_function():
"""Calculate the correct atomic EEQ partial charges from wrapper."""
want = [0.04352191, -0.0924591]
# build molecule from store
m = pyridine()
charge = 0
eeq = get_charges_from_kallisto_molecule(m, charge)
assert np.isclose(eeq[0], want[0])
assert np.isclose(eeq[1], want[1])
def test_kallisto_creation_fails_for_nonembedded_molecule() -> None:
"""It raises a KallistoError when a nonembedded molecule is entered."""
with pytest.raises(KallistoError) as error:
smiles = "CC"
m = Chem.MolFromSmiles(smiles)
kallisto_molecule_from_rdkit_molecule(m)
assert (
error.value.args[0]
== "The kallisto molecule was not created for the input 'CC'"
)
def test_kallisto_coordinates_match_rdkit_coordinates():
"""Both molecules have the same coordinates."""
smiles = "C1CC2=C3C(=CC=C2)C(=CN3C1)"
rdkit_molecule = rdkit_molecule_from_smiles(smiles=smiles)
# get all xyz coordinates and split into list of lines
xyz = Chem.rdmolfiles.MolToXYZBlock(rdkit_molecule).split("\n")
# remove empty lines from list
xyz = [line for line in xyz if line != ""]
# remove number of atoms as given in xmol files (first line)
xyz = xyz[1:]
# create kallisto molecule
kallisto_molecule = kallisto_molecule_from_rdkit_molecule(
rdkit_molecule=rdkit_molecule
)
want = kallisto_molecule.get_positions()
# check each coordinate
for i, coord in enumerate(xyz):
_, x, y, z = coord.split()[:4]
position = [float(x) / Bohr, float(y) / Bohr, float(z) / Bohr]
assert np.isclose(position[0], want[i][0])
assert np.isclose(position[1], want[i][1])
assert np.isclose(position[2], want[i][2])
def test_kallisto_from_rdkit_molecule_with_name():
"""A valid kallisto molecule is generated from an RDKit molecule with _Name."""
# create rdkit molecule with a custom name
smiles = "C1CC2=C3C(=CC=C2)C(=CN3C1)"
rdkit_molecule = rdkit_molecule_from_smiles(smiles=smiles)
rdkit_molecule.SetProp("_Name", "test1")
# create kallisto molecule
kallisto_molecule = kallisto_molecule_from_rdkit_molecule(
rdkit_molecule=rdkit_molecule
)
# verify that molecule is created correctly
rdkit_atoms = CalcNumAtoms(rdkit_molecule)
kallisto_atoms = kallisto_molecule.get_number_of_atoms()
assert rdkit_atoms == kallisto_atoms
| AstraZeneca/jazzy | tests/test_kallisto.py | test_kallisto.py | py | 3,257 | python | en | code | 63 | github-code | 36 | [
{
"api_name": "tests.store.pyridine",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.isclose",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "tests.store.pyridine... |
42893966152 | import json
import boto3
print('creating client')
client = boto3.client(
'dynamodb',
# endpoint_url="http://9.9.9.9:8000",
endpoint_url="http://localhost:8000",
aws_access_key_id='dummyid',
aws_secret_access_key='dummykey',
aws_session_token='dummytoken',
region_name='us-west-2'
)
def lambda_handler(event, context):
"""
Sample lambda function to connect to local dynamo db
"""
try:
# dynamodb = boto3.resource('dynamodb', endpoint_url="http://9.9.9.9:8000")
table_name = 'users_table'
response = client.describe_table(TableName=table_name)
print(response)
item = "test"
except Exception as e:
raise e
else:
return {
"statusCode": 200,
"body": json.dumps({
"message": f"{item}"
}),
} | tthoraldson/PetMatch | src/pet_match_stack/lambdas/dynamo_test/app.py | app.py | py | 878 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 36,
"usage_type": "call"
}
] |
36919269029 | from __future__ import unicode_literals, division, absolute_import, print_function
import logging
import base64
import inspect
import re
import enum
import sys
import textwrap
import time
from datetime import datetime, timezone, timedelta
from typing import Callable, Tuple, Optional
from asn1crypto import x509, keys, core, ocsp
from asn1crypto.ocsp import OCSPRequest, OCSPResponse
from oscrypto import asymmetric
from flask import Flask, request, Response
__version__ = '0.10.2'
__version_info__ = (0, 10, 2)
logger = logging.getLogger(__name__)
if sys.version_info < (3,):
byte_cls = str
else:
byte_cls = bytes
def _pretty_message(string, *params):
"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""
output = textwrap.dedent(string)
# Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
if output.find('\n') != -1:
output = re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])', ' ', output)
if params:
output = output % params
output = output.strip()
return output
def _type_name(value):
"""
:param value:
A value to get the object name of
:return:
A unicode string of the object name
"""
if inspect.isclass(value):
cls = value
else:
cls = value.__class__
if cls.__module__ in set(['builtins', '__builtin__']):
return cls.__name__
return '%s.%s' % (cls.__module__, cls.__name__)
def _writer(func):
"""
Decorator for a custom writer, but a default reader
"""
name = func.__name__
return property(fget=lambda self: getattr(self, '_%s' % name), fset=func)
class OCSPResponseBuilder(object):
_response_status = None
_certificate = None
_certificate_status = None
_revocation_date = None
_certificate_issuer = None
_hash_algo = None
_issuer_hash_algo = None
_key_hash_algo = None
_nonce = None
_this_update = None
_next_update = None
_response_data_extensions = None
_single_response_extensions = None
def __init__(self, response_status, certificate_status_list=[], revocation_date=None):
"""
Unless changed, responses will use SHA-256 for the signature,
and will be valid from the moment created for one week.
:param response_status:
A unicode string of OCSP response type:
- "successful" - when the response includes information about the certificate
- "malformed_request" - when the request could not be understood
- "internal_error" - when an internal error occured with the OCSP responder
- "try_later" - when the OCSP responder is temporarily unavailable
- "sign_required" - when the OCSP request must be signed
- "unauthorized" - when the responder is not the correct responder for the certificate
:param certificate_list:
A list of tuples with certificate serial number and certificate status objects.
certificate_status:
A unicode string of the status of the certificate. Only required if
the response_status is "successful".
- "good" - when the certificate is in good standing
- "revoked" - when the certificate is revoked without a reason code
- "key_compromise" - when a private key is compromised
- "ca_compromise" - when the CA issuing the certificate is compromised
- "affiliation_changed" - when the certificate subject name changed
- "superseded" - when the certificate was replaced with a new one
- "cessation_of_operation" - when the certificate is no longer needed
- "certificate_hold" - when the certificate is temporarily invalid
- "remove_from_crl" - only delta CRLs - when temporary hold is removed
- "privilege_withdrawn" - one of the usages for a certificate was removed
- "unknown" - the responder doesn't know about the certificate being requested
:param revocation_date:
A datetime.datetime object of when the certificate was revoked, if
the response_status is "successful" and the certificate status is
not "good" or "unknown".
"""
self._response_status = response_status
self._certificate_status_list = certificate_status_list
self._revocation_date = revocation_date
self._issuer_hash_algo = 'sha1'
self._key_hash_algo = 'sha1'
self._hash_algo = 'sha256'
self._response_data_extensions = {}
self._single_response_extensions = {}
@_writer
def nonce(self, value):
"""
The nonce that was provided during the request.
"""
if not isinstance(value, byte_cls):
raise TypeError(_pretty_message(
'''
nonce must be a byte string, not %s
''',
_type_name(value)
))
self._nonce = value
@_writer
def certificate_issuer(self, value):
"""
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
"""
if value is not None:
is_oscrypto = isinstance(value, asymmetric.Certificate)
if not is_oscrypto and not isinstance(value, x509.Certificate):
raise TypeError(_pretty_message(
'''
certificate_issuer must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(value)
))
if is_oscrypto:
value = value.asn1
self._certificate_issuer = value
@_writer
def next_update(self, value):
"""
A datetime.datetime object of when the response may next change. This
should only be set if responses are cached. If responses are generated
fresh on every request, this should not be set.
"""
if not isinstance(value, datetime):
raise TypeError(_pretty_message(
'''
next_update must be an instance of datetime.datetime, not %s
''',
_type_name(value)
))
self._next_update = value
@_writer
def issuer_hash_algo(self, value):
"""
String name of the hash algorithm used for hashing the issuer name and
issuer public key.
"""
if not isinstance(value, str):
raise TypeError(_pretty_message(
'''
issuer_hash_algo must be an instance of str, not %s
''',
_type_name(value)
))
self._issuer_hash_algo = value
def build(self, responder_private_key=None, responder_certificate=None):
"""
Validates the request information, constructs the ASN.1 structure and
signs it.
The responder_private_key and responder_certificate parameters are onlystr
required if the response_status is "successful".
:param responder_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the response with
:param responder_certificate:
An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate
object of the certificate associated with the private key
:return:
An asn1crypto.ocsp.OCSPResponse object of the response
"""
if self._response_status != 'successful':
return ocsp.OCSPResponse({
'response_status': self._response_status
})
is_oscrypto = isinstance(responder_private_key, asymmetric.PrivateKey)
if not isinstance(responder_private_key, keys.PrivateKeyInfo) and not is_oscrypto:
raise TypeError(_pretty_message(
'''
responder_private_key must be an instance ofthe c
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''',
_type_name(responder_private_key)
))
cert_is_oscrypto = isinstance(responder_certificate, asymmetric.Certificate)
if not isinstance(responder_certificate, x509.Certificate) and not cert_is_oscrypto:
raise TypeError(_pretty_message(
'''
responder_certificate must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''',
_type_name(responder_certificate)
))
if cert_is_oscrypto:
responder_certificate = responder_certificate.asn1
if self._certificate_status_list is None:
raise ValueError(_pretty_message(
'''
certificate_status_list must be set if the response_status is
"successful"
'''
))
def _make_extension(name, value):
return {
'extn_id': name,
'critical': False,
'extn_value': value
}
responses = []
for serial, status in self._certificate_status_list:
response_data_extensions = []
single_response_extensions = []
for name, value in self._response_data_extensions.items():
response_data_extensions.append(_make_extension(name, value))
if self._nonce:
response_data_extensions.append(
_make_extension('nonce', self._nonce)
)
if not response_data_extensions:
response_data_extensions = None
for name, value in self._single_response_extensions.items():
single_response_extensions.append(_make_extension(name, value))
if self._certificate_issuer:
single_response_extensions.append(
_make_extension(
'certificate_issuer',
[
x509.GeneralName(
name='directory_name',
value=self._certificate_issuer.subject
)
]
)
)
if not single_response_extensions:
single_response_extensions = None
responder_key_hash = getattr(responder_certificate.public_key, self._key_hash_algo)
if status == 'good':
cert_status = ocsp.CertStatus(
name='good',
value=core.Null()
)
elif status == 'unknown':
cert_status = ocsp.CertStatus(
name='unknown',
value=core.Null()
)
else:
reason = status if status != 'revoked' else 'unspecified'
cert_status = ocsp.CertStatus(
name='revoked',
value={
'revocation_time': self._revocation_date,
'revocation_reason': reason,
}
)
issuer = self._certificate_issuer if self._certificate_issuer else responder_certificate
produced_at = datetime.now(timezone.utc).replace(microsecond=0)
if self._this_update is None:
self._this_update = produced_at
response = {
'cert_id': {
'hash_algorithm': {
'algorithm': self._issuer_hash_algo
},
'issuer_name_hash': getattr(issuer.subject, self._issuer_hash_algo),
'issuer_key_hash': getattr(issuer.public_key, self._issuer_hash_algo),
'serial_number': serial,
},
'cert_status': cert_status,
'this_update': self._this_update,
'next_update': self._next_update,
'single_extensions': single_response_extensions
}
responses.append(response)
response_data = ocsp.ResponseData({
'responder_id': ocsp.ResponderId(name='by_key', value=responder_key_hash),
'produced_at': produced_at,
'responses': responses,
'response_extensions': response_data_extensions
})
signature_algo = responder_private_key.algorithm
if signature_algo == 'ec':
signature_algo = 'ecdsa'
signature_algorithm_id = '%s_%s' % (self._hash_algo, signature_algo)
if responder_private_key.algorithm == 'rsa':
sign_func = asymmetric.rsa_pkcs1v15_sign
elif responder_private_key.algorithm == 'dsa':
sign_func = asymmetric.dsa_sign
elif responder_private_key.algorithm == 'ec':
sign_func = asymmetric.ecdsa_sign
if not is_oscrypto:
responder_private_key = asymmetric.load_private_key(responder_private_key)
signature_bytes = sign_func(responder_private_key, response_data.dump(), self._hash_algo)
certs = None
if self._certificate_issuer and getattr(self._certificate_issuer.public_key, self._key_hash_algo) != responder_key_hash:
certs = [responder_certificate]
return ocsp.OCSPResponse({
'response_status': self._response_status,
'response_bytes': {
'response_type': 'basic_ocsp_response',
'response': {
'tbs_response_data': response_data,
'signature_algorithm': {'algorithm': signature_algorithm_id},
'signature': signature_bytes,
'certs': certs,
}
}
})
# Enums
class ResponseStatus(enum.Enum):
successful = 'successful'
malformed_request = 'malformed_request'
internal_error = 'internal_error'
try_later = 'try_later'
sign_required = 'sign_required'
unauthorized = 'unauthorized'
class CertificateStatus(enum.Enum):
good = 'good'
revoked = 'revoked'
key_compromise = 'key_compromise'
ca_compromise = 'ca_compromise'
affiliation_changed = 'affiliation_changed'
superseded = 'superseded'
cessation_of_operation = 'cessation_of_operation'
certificate_hold = 'certificate_hold'
remove_from_crl = 'remove_from_crl'
privilege_withdrawn = 'privilege_withdrawn'
unknown = 'unknown'
# API endpoints
FAULT_REVOKED = "revoked"
FAULT_UNKNOWN = "unknown"
app = Flask(__name__)
class OCSPResponder:
def __init__(self, issuer_cert: str, responder_cert: str, responder_key: str,
fault: str, next_update_seconds: int, response_delay_seconds: int,
include_extraneous_status: bool, issuer_hash_algorithm: str):
"""
Create a new OCSPResponder instance.
:param issuer_cert: Path to the issuer certificate.
:param responder_cert: Path to the certificate of the OCSP responder
with the `OCSP Signing` extension.
:param responder_key: Path to the private key belonging to the
responder cert.
:param validate_func: A function that - given a certificate serial -
will return the appropriate :class:`CertificateStatus` and -
depending on the status - a revocation datetime.
:param cert_retrieve_func: A function that - given a certificate serial -
will return the corresponding certificate as a string.
:param next_update_seconds: The ``nextUpdate`` value that will be written
into the response. Default: 9 hours.
:param response_delay_seconds: Delays the HTTP response by this many seconds.
:param include_extraneous_status: Include status of irrelevant certs in the response.
:param issuer_hash_algorithm: Algorithm to use when hashing the issuer name & key.
"""
# Certs and keys
self._issuer_cert = asymmetric.load_certificate(issuer_cert)
self._responder_cert = asymmetric.load_certificate(responder_cert)
self._responder_key = asymmetric.load_private_key(responder_key)
# Next update
self._next_update_seconds = next_update_seconds
self._fault = fault
self._response_delay_seconds = response_delay_seconds
self._include_extraneous_status = include_extraneous_status
self._issuer_hash_algorithm = issuer_hash_algorithm
def _fail(self, status: ResponseStatus) -> OCSPResponse:
builder = OCSPResponseBuilder(response_status=status.value)
return builder.build()
def parse_ocsp_request(self, request_der: bytes) -> OCSPRequest:
"""
Parse the request bytes, return an ``OCSPRequest`` instance.
"""
return OCSPRequest.load(request_der)
def validate(self):
time = datetime(2018, 1, 1, 1, 00, 00, 00, timezone.utc)
if self._fault == FAULT_REVOKED:
return (CertificateStatus.revoked, time)
elif self._fault == FAULT_UNKNOWN:
return (CertificateStatus.unknown, None)
elif self._fault != None:
raise NotImplemented('Fault type could not be found')
return (CertificateStatus.good, time)
def _build_ocsp_response(self, ocsp_request: OCSPRequest) -> OCSPResponse:
"""
Create and return an OCSP response from an OCSP request.
"""
# Get the certificate serial
tbs_request = ocsp_request['tbs_request']
request_list = tbs_request['request_list']
if len(request_list) < 1:
logger.warning('Received OCSP request with no requests')
raise NotImplemented('Empty requests not supported')
single_request = request_list[0] # TODO: Support more than one request
req_cert = single_request['req_cert']
serial = req_cert['serial_number'].native
# Check certificate status
try:
certificate_status, revocation_date = self.validate()
except Exception as e:
logger.exception('Could not determine certificate status: %s', e)
return self._fail(ResponseStatus.internal_error)
if self._include_extraneous_status:
revocation_date = datetime(2018, 1, 1, 1, 00, 00, 00, timezone.utc)
certificate_status_list = [ (serial+3, CertificateStatus.good.value),
(serial+2, CertificateStatus.unknown.value),
(serial+1, CertificateStatus.revoked.value),
(serial, certificate_status.value) ]
else:
certificate_status_list = [(serial, certificate_status.value)]
# Build the response
builder = OCSPResponseBuilder(**{
'response_status': ResponseStatus.successful.value,
'certificate_status_list': certificate_status_list,
'revocation_date': revocation_date,
})
# Parse extensions
for extension in tbs_request['request_extensions']:
extn_id = extension['extn_id'].native
critical = extension['critical'].native
value = extension['extn_value'].parsed
# This variable tracks whether any unknown extensions were encountered
unknown = False
# Handle nonce extension
if extn_id == 'nonce':
builder.nonce = value.native
# That's all we know
else:
unknown = True
# If an unknown critical extension is encountered (which should not
# usually happen, according to RFC 6960 4.1.2), we should throw our
# hands up in despair and run.
if unknown is True and critical is True:
logger.warning('Could not parse unknown critical extension: %r',
dict(extension.native))
return self._fail(ResponseStatus.internal_error)
# If it's an unknown non-critical extension, we can safely ignore it.
elif unknown is True:
logger.info('Ignored unknown non-critical extension: %r', dict(extension.native))
# Set certificate issuer
builder.certificate_issuer = self._issuer_cert
# Set the issuer hash algorithm
if self._issuer_hash_algorithm:
builder.issuer_hash_algo = self._issuer_hash_algorithm
# Set next update date
if self._next_update_seconds > 0:
now = datetime.now(timezone.utc)
builder.next_update = (now + timedelta(seconds=self._next_update_seconds)).replace(microsecond=0)
return builder.build(self._responder_key, self._responder_cert)
def build_http_response(self, request_der: bytes) -> Response:
global app
response_der = self._build_ocsp_response(request_der).dump()
if self._response_delay_seconds > 0:
logger.warning("Delaying OCSP response by " + str(self._response_delay_seconds) + " seconds")
time.sleep(self._response_delay_seconds)
resp = app.make_response((response_der, 200))
resp.headers['content_type'] = 'application/ocsp-response'
return resp
responder = None
def init_responder(issuer_cert: str, responder_cert: str, responder_key: str, fault: str,
next_update_seconds: int, response_delay_seconds: int, include_extraneous_status: bool,
issuer_hash_algorithm: str):
global responder
responder = OCSPResponder(issuer_cert=issuer_cert, responder_cert=responder_cert, responder_key=responder_key,
fault=fault, next_update_seconds=next_update_seconds, response_delay_seconds=response_delay_seconds,
include_extraneous_status=include_extraneous_status, issuer_hash_algorithm=issuer_hash_algorithm)
def init(port=8080, debug=False, host=None):
logger.info('Launching %sserver on port %d', 'debug' if debug else '', port)
app.run(port=port, debug=debug, host=host)
@app.route('/', methods=['GET'])
def _handle_root():
return 'ocsp-responder'
@app.route('/status/', defaults={'u_path': ''}, methods=['GET'])
@app.route('/status/<path:u_path>', methods=['GET'])
def _handle_get(u_path):
global responder
"""
An OCSP GET request contains the DER-in-base64 encoded OCSP request in the
HTTP request URL.
"""
if "Host" not in request.headers:
raise ValueError ("Required 'Host' header not present")
der = base64.b64decode(u_path)
ocsp_request = responder.parse_ocsp_request(der)
return responder.build_http_response(ocsp_request)
@app.route('/status', methods=['POST'])
def _handle_post():
global responder
"""
An OCSP POST request contains the DER encoded OCSP request in the HTTP
request body.
"""
if "Host" not in request.headers:
raise ValueError ("Required 'Host' header not present")
ocsp_request = responder.parse_ocsp_request(request.data)
return responder.build_http_response(ocsp_request)
| mongodb/mongo | src/third_party/mock_ocsp_responder/mock_ocsp_responder.py | mock_ocsp_responder.py | py | 23,898 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sys.version_info",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "textwrap.dedent",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
70427123945 |
def outer_function():
num = 20
def inner_function():
global num
num = 30
print("Before calling inner_function(): ", num)
inner_function()
print("After calling inner_function(): ", num)
outer_function()
print("Outside both function: ", num)
import random
print(random.randrange(-10, 20))
list1 = ['a', 'b', 'c', 'd', 'e']
# get random item from list1
print(random.choice(list1))
# Shuffle list1
random.shuffle(list1)
# Print the shuffled list1
print(list1)
# Print random element
print(random.random())
print(list1[-5])
# slicing in python
# start index is inclusive, end index is exclusive
print(list1[-5:-1])
# append
list1.append(1)
print(list1)
# extend
list2 = [2, 3, 4, 5]
list1.extend(list2)
print(list1)
# del
# start index is inclusive, end index is exclusive
del list1[4: 7]
print(list1)
# remove
list1.remove(4)
print(list1)
print(4 in list1)
print('a' in list1)
print(len(list1))
# List Comprehension
numbers = [x*x for x in range(1, 5)]
print(numbers)
evenOdd = ['Even' if x%2==0 else 'Odd' for x in range(2,7)]
print(evenOdd)
# list fns
list3 = ['Python', 'C++', 'C', 'C#']
list3.append('JavaScript')
print(list3)
list3.insert(1, 'Go')
print(list3)
# tuples
list4 = 'Go', 'Goa', 'Gone'
# we can create tuples without using ()
print(list4)
# create a tuple with only one element
list5 = 'Python',
# if we don't add commas it will be considered as string
print(list5)
# Set
list6 = {'Py', 65, 2.0, 65}
# set has no particular order, no duplicates
print(list6)
# create an empty set
empty_set = set()
print(empty_set)
# create an empty dictionary
empty_dictionary = { }
print(empty_dictionary)
# add elements to set
list6.add('Charm')
print(list6)
# update
list7 = ['Py', 'Charm', 69]
list6.update(list7)
print(list6)
# remove
list6.discard(65)
print(list6)
# Dictionary
student_id = {
101: 'Ashutosh',
102: 'Abhishek',
103: 'Saddam',
}
print(student_id)
del student_id[103]
print(student_id.keys())
# file i/o
# try:
# # open
# file1 = open("c:\dev\Python\\test.txt")
# # read
# content = file1.read()
# print(content)
# finally:
# # close
# file1.close()
# # with...open
# with open('c:\dev\Python\\test.txt') as file2:
# content = file2.read(5)
# print(content)
# # using with...open we don't have to close the file
# # write
# with open('c:\dev\Python\\test2.txt', 'w+') as file3:
# file3.write('Hell world')
import os
# get cwd
print(os.getcwd())
# change cwd
# os.chdir('c:\dev\Python\Py')
# list dir
print(os.listdir())
# create dir
# os.mkdir('testPy')
# to remove a non-empty dir
# import shutil
# shutil.rmtree('dirname')
# list all the built-in exceptions
# print(dir(locals()['__builtins__']))
# define Python user-defined exceptions
# class InvalidAgeException(Exception):
# "Raised when the input value is less than 18"
# pass
# # you need to guess this number
# number = 18
# try:
# input_num = int(input("Enter a number: "))
# if input_num < number:
# raise InvalidAgeException
# else:
# print("Eligible to Vote")
# except InvalidAgeException:
# print("Exception occurred: Invalid Age")
class Parrot:
name = ''
age = 0
parrot1 = Parrot()
parrot1.name = "Blu"
parrot1.age = 10
parrot2 = Parrot()
parrot2.name = "WOO"
parrot2.age = parrot1.age
print(parrot1.name, parrot1.age)
print(parrot2.name, parrot2.age)
class Animal:
def eat(self):
print("I eat!")
def sleep(self):
print("I sleep!")
class Dog(Animal):
def bark(self):
print("WOOF WOOF!")
dog1 = Dog()
dog1.bark()
dog1.sleep()
class Computer:
def __init__(self):
self.__maxprice = 1000
def sell(self):
print("Selling Price: {} buying?".format(self.__maxprice))
def setMaxPrice(self, price):
self.__maxprice = price
dell = Computer()
dell.sell()
dell.__maxprice = 999
dell.sell()
dell.setMaxPrice(969)
dell.sell()
class Polygon:
def render(self):
print('Rendering Polygon...')
class Square(Polygon):
def render(self):
print('Rendering Square...')
class Circle(Polygon):
# def render(self):
# print('Rendering Circle...')
pass
obj1 = Square()
obj1.render()
obj2 = Circle()
obj2.render()
class Bike:
def __init__(self, name, speed):
self.name = name
self.speed = speed
def details(self):
print('Name: {}, Speed: {}'.format(self.name, self.speed))
bike1 = Bike('Yamaha R1', 350)
bike2 = Bike('Kawasaki H2', 400)
bike1.details()
bike2.details()
class Poly:
def __init__(self, no_of_sides):
self.n = no_of_sides
self.sides = [0 for i in range(no_of_sides)]
def inputSides(self):
self.sides = [float(input('Enter side '+str(i+1)+' : ')) for i in range(self.n)]
def dispSides(self):
for i in range(self.n):
print('Side', i+1, 'is', self.sides[i])
class Triangle(Poly):
def __init__(self):
Poly.__init__(self, 3)
def findArea(self):
a, b, c = self.sides
s = (a+b+c)/2
area = (s*(s-a)*(s-b)*(s-c)) ** 0.5
print('Area of Triangle is %0.2f' %area)
t = Triangle()
# t.inputSides()
# t.dispSides()
# t.findArea()
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "({0}, {1})".format(self.x, self.y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Point(x, y)
p1 = Point(1, 3)
p2 = Point(2, 4)
print(p1+p2)
print(p1.__add__(p2))
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __gt__(self, other):
return self.age > other.age
person1 = Person('asu', 24)
person2 = Person('zero', 23)
print(person1 > person2)
print(person1 < person2)
#! Iterator
# __iter()__ & __next() are collectively called iterator protocol.
my_list = [1, 3, 5, 7, 11]
iterator = iter(my_list)
print(next(iterator))
print(next(iterator))
# we will get StopIteration exception at last.
for i in iterator:
print(i, end=' ')
# process continues until the iterator is exhausted.
class PowTwo:
def __init__(self, max=0):
self.max = max
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n <= self.max:
res = 2 ** self.n
self.n += 1
return res
else:
raise StopIteration
# print('')
for y in PowTwo(1):
print(y)
numbers = PowTwo(3)
pt = iter(numbers)
for x in pt:
print(x)
# Infinite Iterators:
from itertools import count
infinite_iterator = count(1)
for i in range(3):
print(i,":",next(infinite_iterator))
#! Generator
# a generator is a function that returns an iterator -
# that produces a sequence of values when iterated over.
# yield is used to produce value from generator
# yield instead of return is the difference b/w fn & generator
# When the generator function is called, it doesn't
# execute the fn body immediately. Instead, it returns
# a generator object that can be
# iterated over to produce the values.
def my_generator(n):
value = 0
while value < n:
yield value
value += 1
for value in my_generator(3):
print(value)
generator = my_generator(3)
print(next(generator))
print(next(generator))
#* Generator Expression
# Syntax: (expression for item in iterable)
squares_generator = (i*i for i in range(5))
for i in squares_generator:
print(i)
#? We can only iterate over once since generators -
#? can only be used once.
def PowTwoGen(max=0):
n = 0
while n < max:
yield 2 ** n
n += 1
for i in PowTwoGen(3):
print(i)
# since generators produce only one item at a time,
# they can represent an infinite stream of data.
# Pipelining Generators
def fibonacci_numbers(nums):
x, y = 0, 1
for _ in range(nums):
x, y = y, x+y
yield x
# The '_' variable name is used to indicate that, the
# loop variable isn't actually used in the loop body.
# y = y+x
# x = y-x
def square(nums):
for num in nums:
yield num ** 2
print(sum(square(fibonacci_numbers(10))))
#* A function with yield instead of return,
#* when called returns a Generator
# Controlling a Generator Exhaustion
class Bank():
crisis = False
def create_atm(self):
while not self.crisis:
yield "$100"
sbi = Bank()
corner_street_atm = sbi.create_atm()
print(corner_street_atm.__next__())
print([corner_street_atm.__next__() for cash in range(5)])
sbi.crisis = True # crisis is coming, no more money
# print(corner_street_atm.__next__())
wall_street_atm = sbi.create_atm()
# print(wall_street_atm.__next__()) # It's even true for new ATMs
sbi.crisis = False
# print(corner_street_atm.__next__()) # Even post-crisis ATMs remain empty
# print(wall_street_atm.__next__())
brand_new_atm = sbi.create_atm()
# print(brand_new_atm.__next__()) # Build new ATM to get back in buisness
# this is generator exhaustion, when the function runs off.
# Reversing a string
greet = 'HeLlO WoRlD'
reversed_greet = greet[::-1]
print(reversed_greet)
# Check if a string contains a substring
substring_greet = 'HeLlo'
if substring_greet in greet:
print(True)
else:
print(False)
# Find the maximum value in the list
the_list = [1, 4, 9, 3, 0, 4, 2, 1]
max_val = max(the_list)
print(max_val)
# Find the index of max value in the list
idx_max_val = the_list.index(max_val)
print(idx_max_val)
# Removing duplicates from the list
new_list = list(set(the_list))
print(new_list)
# Check if list is empty
empty_list = []
if not empty_list:
print('List is empty')
# Counting occurences of an item
item = 4
no_of_occurence = the_list.count(item)
print(no_of_occurence)
# Check if all item in the list is unique
if len(the_list) == len(set(the_list)):
print('All items are unique')
else:
print('Not Unique!')
# Removing all occurences of an item from the list
list_item = [x for x in the_list if x != item]
print(list_item)
# Flattening a nested list
nested_list = [[1, 3], [5, 7]]
flattened_list = [x for y in nested_list for x in y]
print(flattened_list)
# Remove all whitespaces from a string
white_string = ' Hell o World '
rm_ws_string = ''.join(white_string.split())
print(rm_ws_string)
# Remove duplicates from a string
rm_duplicate = ''.join(set(white_string))
print(rm_duplicate)
# Count no. of words in a string
word_cnt = len(white_string.split())
print(word_cnt)
# Generate a random integer
import random
random_int = random.randint(1, 6)
print(random_int)
# Merging two dictionaries
dict1 = {'apple': 3, 'banana': 4}
dict2 = {'orange': 1, 'mango': 2}
merged_list = [*dict1, *dict2]
print(merged_list)
merged_set = {*dict1, *dict2}
print(merged_set)
merged_dict = {**dict1, **dict2}
print(merged_dict)
#! Sorting a dictionary
my_dict = {
'apple': 4,
'grape': 5,
'watermelon': 1,
'mango': 2,
'litchi': 3,
}
print(my_dict.values())
print(my_dict.keys())
print(my_dict.items())
# Sorting by key
key_dict = dict(sorted(my_dict.items()))
print(key_dict)
# Sorting by value
val_dict = dict(sorted(my_dict.items(), key=lambda item: item[1]))
print(val_dict)
val_dict_comp = {key: value for key, value in sorted(my_dict.items(), key=lambda item: item[1])}
print(val_dict_comp)
#! Closure
# Python closure is a nested function that allows us to access -
# outer function's variables even after outer function is closed.
def outerFunction():
name = 'Ashutosh'
return lambda: 'Hi ' + name
message = outerFunction()
print(message())
def outer_function():
num = 1
def inner_function():
nonlocal num
num += 2
return num
return inner_function
odd = outer_function()
print(odd())
print(odd())
print(odd())
odd2 = outer_function()
print(odd2())
print(odd2.__closure__)
#! Decorators
# a python decorator is a fn, that takes in a fn &
# returns it by adding some functionality.
def make_pretty(func):
def inner():
print('Decorated :)')
func()
return inner
# def ordinary():
# print('Ordinary :(')
# decorated_func = make_pretty(ordinary)
# decorated_func()
# |||
@make_pretty
def ordinary():
print('Ordinary :(')
ordinary()
def smart_divide(func):
def inner(a, b):
print('I am going to divide', a, 'and', b)
if b == 0:
print('Whoops!, Cannot Divide.')
return
return func(a, b)
return inner
@smart_divide
def divide(a, b):
return a/b
divide(2, 0)
def star(func):
def inner(*args, **kwargs):
print('*' * 15)
func(*args, **kwargs)
print('*' * 15)
return inner
def percent(func):
def inner(*args, **kwargs):
print('%' * 15)
func(*args, **kwargs)
print('%' * 15)
return inner
@star
@percent
def printer(msg):
print(msg)
printer('HellBoy')
#! @property Decorator
# a pythonic way to use getters and setters in OOPs
class Celsius1:
def __init__(self, temperature=0):
self.temperature = temperature
def to_fahrenheit(self):
return (self.temperature * 1.8) + 32
human = Celsius1()
human.temperature = 37
print(human.temperature)
print(human.to_fahrenheit())
class Celsius2:
def __init__(self, temperature=0):
self.set_temperature(temperature)
def to_fahrenheit(self):
return (self.get_temperature() * 1.8) + 32
# getter
def get_temperature(self):
return self._temperature
# setter
def set_temperature(self, value):
if value < -273.15:
raise ValueError('Invalid Temperature')
self._temperature = value
human = Celsius2(37)
print(human.get_temperature())
print(human.to_fahrenheit())
# human.set_temperature(-300)
# print(human.to_fahrenheit())
class Celsius3:
def __init__(self, temperature=0):
self.temperature = temperature
def to_fahrenheit(self):
return (self.temperature * 1.8) + 32
def get_temperature(self):
print('Getting Value...')
return self._temperature
def set_temperature(self, value):
print('Setting Value...')
if value < -273.15:
raise ValueError('Invalid Temperature')
self._temperature = value
temperature = property(get_temperature, set_temperature)
# The actual temperature value is stored in the private
# _temperature variable. The temperature attribute is a
# property object which provides an interface to this private variable.
human = Celsius3(37)
print(human.temperature)
print(human.to_fahrenheit())
print('')
class Celsius:
def __init__(self, temperature=0):
self.temperature = temperature
def to_fahrenheit(self):
return (self.temperature * 1.8) + 32
@property
def temperature(self):
print('Setting Value...')
return self._temperature
@temperature.setter
def temperature(self, value):
print('Getting Value...')
if value < -273.15:
raise ValueError('Invalid Temperature!')
self._temperature = value
human = Celsius(37)
print(human.temperature)
print(human.to_fahrenheit())
#! RegEx
# i.e. Regular Expression
# is a sequence of characters that defines a search pattern.
# e.g. ^a...s$
import re
pattern = '^a...s$'
test_string = 'abyss'
result = re.match(pattern, test_string)
if result:
print('Match Found!')
else:
print('No Match!')
#* MetaCharcters
# Metacharacters are characters that are interpreted in a
# special way by RegEx engine.
# $ ^ * () + [] {} \ | . ?
#? [] - Square brackets
# Specifies a set of characters you wish to match.
# [abc] - abc de ca - 5 matches
# [a-e] is the same as [abcde]
# [^0-9] means any non-digit character
#? . - Period
# A period any single character(except '\n')
# .. - abc - 1 match
# .. - abcd - 2 match
#? ^ - Caret
# Used to check if a string starts with a certain character
# ^a - abc - 1 match
# ^a - bca - 0 match
#? $ - Dollar
# Used to check if a string ends with a certain character
# a$ - abc - 0 match
# a$ - bca - 1 match
#? * - Star
# Matches zero or more occurences of the pattern left to it.
# ma*n - mn - 1 match
# ma*n - maaan - 1 match
# ma*n - main - 0 match
#? + - Plus
# Matches one or more occurences of the pattern left to it.
# ma+n - mn - 0 match
# ma+n - maaan - 1 match
# ma+n - main - 0 match
#? ? - Question Mark
# Matches zero or one occurence pattern left to it.
# ma?n - mn - 1 match
# ma?n - maaan - 0 match
# ma?n - main - 1 match
#? {} - Braces
# {n, m} means at least n, and at most m repetitions of
# the pattern left to it.
# a{2, 3} - abc dat - 0 match
# a{2, 3} - abc daat - 1 match
# a{2, 3} - aabc daat - 2 match
#? | - Alternation
# a|b - cde - No match
# a|b - ade - 1 match
#? () - Group
# Parentheses () is used to group sub-patterns.
# (a|b|c)xz match any string that matches
# either a or b or c followed by xz
#? \ - Backslash
# Used to esacape various characters including all metacharacters.
# \$a match if a string contains $ followed by a. Here $,
# is not interpreted by a RegEx engine.
#* Special Sequences:
# makes easier to write commonly used patterns.
#* \A
# Matches if the specified characters are at the
# start of a string.
# \Athe - the sun - match
# \Athe - In the sun - NO match
#* \b
# Matches if the specified characters are at the
# beginning or end of the word.
# \bfoo - football - match
# \bfoo - afootball - NO match
# foo\b - kungfoo - match
#* \B
# opposite of \b
#* \d
# Matches any decimal digit.
# Equivalent to [0-9]
#* \D
# opposite to \d
#* \s
# Matches where a string contains any
# whitespace character.
# Equivalent to [ \t\n\r\f\v]
# \s - Python RegEx - 1 match
# \s - PythonRegEx - NO match
#* \S
# opposite of \s
# Equivalent to [^ \t\n\r\f\v]
#* \w
# Matches any alphanumeric character.
# Equivalent to [a-zA-Z0-9_]
# \w - 6&"_:"c - 3 matches
# \w - %>! - NO match
#* \W
# opposite of \w
#* \Z
# Matches if the specified characters
# are at the end of a string.
# Python\Z - I like Python - 1 match
# Python\Z - Python is fun - NO match
#! Python RegEx
#? re.findall()
# returns a list of strings containing all the matches.
# else an empty list.
string = 'hello 13 hi 69, '
pattern = '\d+'
result = re.findall(pattern, string)
print(result)
#? re.split()
# Splits the string where there is a match, and
# returns a list of strings where the splits have occurred.
# return list containing original string if pattern not found.
string = 'Thirteen:13 Sixty nine:69.'
pattern = '\d+'
result = re.split(pattern, string)
print(result)
result = re.split(pattern, string, maxsplit=1)
print(result)
#? re.sub()
# Returns a string where matched occurences are
# replaced with a content of replace variable.
string = 'abc 13\de 23\n f45'
pattern = '\s+'
replace = 'XXX'
result = re.sub(pattern, replace, string)
print(result)
#? re.subn()
# similar to re.sub() except it return a tuple
# containing the new string and the no. of substitutions
string = 'abc 13\de 23\n f45'
pattern = '\s+'
replace = 'XXX'
result = re.subn(pattern, replace, string)
print(result)
#? re.search()
# Looks for the first location where the RegEx pattern
# produces a match with a string.
# returns a match object; if not, returns None.
string = 'Python is Fun!'
match = re.search('\APython', string)
if match:
print('Pattern found inside the string.')
else:
print('Pattern Not Found!')
#* Match Object
#? match.group()
# returns the part of the string where there is a match.
string = '39801 356, 2102 1111'
pattern = '(\d{3}) (\d{2})'
match = re.search(pattern, string)
if match:
print(match.group())
else:
print('Pattern Not Found!')
#! Python DateTime
import datetime
now = datetime.datetime.now()
print(now)
today = datetime.date.today()
print(today)
print(today.year)
from datetime import timedelta
# timedelta
t = timedelta(days = 5, hours = 1, seconds = 33, microseconds = 233423)
print("Total seconds =", t.total_seconds())
#? strftime()
# Creates a formatted string from a given datetime object.
t = now.strftime("%H:%M:%S")
print(t)
t = now.strftime("%d/%m/%Y")
print(t)
#? strptime()
# Creates a datetime object from a given string.
date_string = '25 December, 2022'
print('Date String:', date_string)
date_object = datetime.datetime.strptime(date_string, '%d %B, %Y')
print('Date Object:', date_object)
# Timezones
import pytz
local = datetime.datetime.now()
print('Local:', local.strftime('%m/%d/%Y, %H:%M:%S%p'))
tz_NY = pytz.timezone('America/New_York')
datetime_NY = datetime.datetime.now(tz_NY)
print('NY:', datetime_NY.strftime('%m/%d/%Y, %H:%M:%S%p'))
# Timestamp
# timestamp is no. of seconds since 1/1/1970
ts = datetime.datetime.timestamp(now)
print(ts)
dt_object = datetime.datetime.fromtimestamp(ts)
print(dt_object)
import time
seconds = time.time()
print('Seconds since epoch:', seconds)
seconds = 1000000000
# time.ctime() === takes seconds as arguments & returns
# a string representing local time
the_time = time.ctime(seconds)
print(the_time)
#? sleep()
# delays execution of the current thread for the
# given no. of seconds
print('Printed Immediately.')
time.sleep(1.69)
print('Printed after 1.69 seconds.')
#! Multithreading in Python
# A program is a collection of instructions.
# A process is the execution of those instructions.
# A thread is a subset of process.
# A process can have one or more threads.
import threading
def print_hello_three_times():
for i in range(3):
print('Hello!')
def print_hi_three_times():
for i in range(3):
print('Hi!')
t1 = threading.Thread(target=print_hello_three_times)
t2 = threading.Thread(target=print_hi_three_times)
t1.start()
t2.start()
def print_hello():
for i in range(4):
time.sleep(0.5)
print('Hello')
def print_hi():
for i in range(4):
time.sleep(0.75)
print('Hi')
t3 = threading.Thread(target=print_hello)
t4 = threading.Thread(target=print_hi)
t3.start()
t4.start() | asu2sh/dev | Python/main.py | main.py | py | 22,131 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "random.randrange",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "random.shuffle",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "random.random",
"li... |
31877057798 | import numpy as np
import cv2 as cv
from pathlib import Path
def get_image():
Class = 'BACKWARD'
Path('DATASET1/'+Class).mkdir(parents=True, exist_ok=True)
width=1200
height=720
cam=cv.VideoCapture(0,cv.CAP_DSHOW)
cam.set(cv.CAP_PROP_FRAME_WIDTH, width)
cam.set(cv.CAP_PROP_FRAME_HEIGHT,height)
cam.set(cv.CAP_PROP_FPS, 30)
cam.set(cv.CAP_PROP_FOURCC,cv.VideoWriter_fourcc(*'MJPG'))
if not cam.isOpened():
print("Cannot open camera")
exit()
i = 0
while True:
ret, frame = cam.read()
frame = cv.resize(frame,(width,height))
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
frame = cv.flip(frame,1)
i+= 1
if i % 5==0:
cv.imwrite('DATASET1/'+Class+'/'+str(i)+'.png',frame)
cv.imshow('frame', frame)
if cv.waitKey(1) == ord('q') or i > 500:
break
cam.release()
cv.destroyAllWindows()
if __name__ == "__main__":
get_image()
| Ewon12/Hand-Gesture-Recogniton-Control-for-a-Cylindrical-Manipulator | get_image.py | get_image.py | py | 1,043 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_DSHOW",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "cv2.CAP_PROP_FRAME_WI... |
74352477543 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
A module containing some options for configuring everything.
"""
import inspect
import os
import logging
from configparser import ConfigParser, NoOptionError
import PyQt5.QtWidgets as QW
import PyQt5.QtCore as QC
import PyQt5.QtGui as QG
from yt import YTQuantity
from simgui_modules.buttons import coolButton
from simgui_modules.checkBoxes import coolCheckBox, createAnnotationBoxes
from simgui_modules.comboBoxes import createTimeQuantityBox, createWeightBoxes
from simgui_modules.lineEdits import createColorSchemeEdit, coolEdit, \
validColors
from simgui_modules.logging import LoggingOptionsDialog
GUILogger = logging.getLogger("GUI")
ytLogger = logging.getLogger("yt")
# Load the configuration file
config = ConfigParser()
config.read("simgui_registry/GUIconfig.ini")
class ConfigDialog(QW.QDialog):
"""A Dialog where the user can set all configuration options.
Parameters:
Param_Dict: To receive initial conditions and store the outputs
parent: QWidget: The Dialogs' parent"""
def __init__(self, Param_Dict, Misc_Dict, parent):
super().__init__(parent)
self.Param_Dict = Param_Dict
self.Misc_Dict = Misc_Dict
self.Config_Dict = {}
self.setWindowFlags( # Set minimize, maximize and close button
QC.Qt.Window |
QC.Qt.CustomizeWindowHint |
QC.Qt.WindowTitleHint |
QC.Qt.WindowMinimizeButtonHint |
QC.Qt.WindowMaximizeButtonHint |
QC.Qt.WindowCloseButtonHint
)
self.initUi()
self.signalsConnection()
self.setWindowIcon(QG.QIcon('simgui_registry/CoverIcon.png'))
self.setWindowTitle("Set up configuration")
self.show()
def initUi(self):
"""Initialize the visual elements of the user interface"""
# Buttons for closing the window:
self.buttonBox = QW.QDialogButtonBox(self)
self.buttonBox.addButton("Save", QW.QDialogButtonBox.AcceptRole)
self.buttonBox.addButton("Cancel", QW.QDialogButtonBox.RejectRole)
self.textBrowser = QW.QTextBrowser()
text = """Here you can set up the default settings the GUI is started
with.<br>Except for the home directory, <b>these options will not
change the current session of the GUI</b>.<br>
The current logging settings can be changed by pressing <i>CTRL+L</i>.
"""
self.textBrowser.setHtml(text)
self.textBrowser.setMinimumWidth(150)
self.homeDir = config["Path"]["homedir"]
self.directoryLabel = QW.QLabel(f'Current home directory:\n{self.homeDir}')
self.directoryButton = coolButton(text="Set home directory",
tooltip="Set the new home directory")
layout = QW.QGridLayout()
layout.addWidget(self.textBrowser, 0, 0)
layout.addWidget(self.createCheckBoxes(), 0, 1)
layout.addWidget(self.createLoggings(), 2, 0)
layout.addWidget(self.createMiscs(), 2, 1)
layout.addWidget(self.directoryLabel, 3, 0, 1, 2)
layout.addWidget(self.directoryButton, 4, 0, 1, 2)
layout.addWidget(self.buttonBox, 5, 1)
self.setLayout(layout)
def createCheckBoxes(self):
"""Creates a group box with all relevant Checkboxes on it, and stores
them in CheckBox_Dict"""
wid = QW.QGroupBox("CheckBox defaults")
mainLayout = QW.QHBoxLayout(wid)
mainLayout.setContentsMargins(0, 0, 0, 0)
annoWid = QW.QWidget()
annoLayout = QW.QVBoxLayout(annoWid)
annoLayout.setSpacing(3)
annoLayout.addWidget(QW.QLabel("Annotations: "))
annoKeys = ["Timestamp", "Scale", "Grid",
"VelVectors", "VelStreamlines",
"MagVectors", "MagStreamlines", "Contour", "Particleanno",
"LineAnno"]
annoBoxes = createAnnotationBoxes(width=None, defaultString="default ")
self.CheckBox_Dict = dict(zip(annoKeys, annoBoxes))
for key in sorted(annoKeys):
annoLayout.addWidget(self.CheckBox_Dict[key])
annoLayout.addStretch(1)
miscWid = QW.QWidget()
miscLayout = QW.QVBoxLayout(miscWid)
miscLayout.setSpacing(3)
miscLayout.addWidget(QW.QLabel("Miscellaneous: "))
otherKeys = ["XLog", "YLog", "ZLog", "SetAspect", "QuitDialog"]
otherTexts = ["Log horizontal axis", "Log vertical axis",
"Log color bar axis",
"Ignore aspect ratio",
"Warn me before quitting"]
for key, text in zip(otherKeys, otherTexts):
checkBox = coolCheckBox(text, f"Toggle {text.lower()} default",
width=None)
self.CheckBox_Dict[key] = checkBox
if key == "QuitDialog":
miscLayout.addStretch(1)
miscLayout.addWidget(checkBox)
for key in self.CheckBox_Dict.keys():
self.CheckBox_Dict[key].setChecked(config.getboolean("CheckBoxes", key))
mainLayout.addWidget(annoWid)
mainLayout.addWidget(miscWid)
wid.setMinimumWidth(400)
return wid
def createLoggings(self):
"""Creates a group box for logging options"""
wid = QW.QGroupBox("Logging defaults")
layout = QW.QVBoxLayout(wid)
layout.setSpacing(3)
layout.setContentsMargins(3, 3, 3, 3)
self.LogDialog = LoggingOptionsDialog(self.Misc_Dict, parent=self,
configDialog=True)
layout.addWidget(self.LogDialog)
return wid
def createMiscs(self):
"""Creates a group box with all relevant Misc items on it, and stores
them in Misc_Dict"""
wid = QW.QGroupBox("Miscellaneous defaults")
layout = QW.QFormLayout(wid)
layout.setSpacing(3)
self.Misc_Dict = {}
keys = ["gridunit", "colorscheme", "timequantity", "weightfield"]
self.Misc_Dict["colorscheme"] = createColorSchemeEdit(width=None)
self.Misc_Dict["colorscheme"].setText(config["Misc"]["colorscheme"])
self.Misc_Dict["colorscheme"].setToolTip("Set the default color scheme")
self.Misc_Dict["gridunit"] = coolEdit(config["Misc"]["gridunit"], "au",
"Set the length unit default for"
" the GUI", width=None)
self.Misc_Dict["timequantity"] = createTimeQuantityBox(width=None)
self.Misc_Dict["timequantity"].setCurrentText(config["Misc"]["timequantity"])
self.Misc_Dict["weightfield"] = createWeightBoxes("Color", width=None)
self.Misc_Dict["weightfield"].setToolTip("Select weight field default for "
"phase, profile and projection plots")
self.Misc_Dict["weightfield"].setCurrentText(config["Misc"]["weightfield"])
texts = ["Color scheme: ", "Length unit: ", "Time quantity: ",
"Weight field: "]
keys = ["colorscheme", "gridunit", "timequantity", "weightfield"]
for text, key in zip(texts, keys):
layout.addRow(text, self.Misc_Dict[key])
return wid
def signalsConnection(self):
"""Connect the signals of accept and cancelbutton"""
self.buttonBox.accepted.connect(self.saveSettings)
self.buttonBox.rejected.connect(self.cancelPressed)
self.directoryButton.clicked.connect(self.getHomeDir)
self.Misc_Dict["colorscheme"].textChanged.connect(self.getColorInput)
self.Misc_Dict["gridunit"].textChanged.connect(self.getGridInput)
self.Misc_Dict["timequantity"].currentIndexChanged.connect(lambda: self.getComboInput("timequantity"))
self.Misc_Dict["weightfield"].currentIndexChanged.connect(lambda: self.getComboInput("weightfield"))
self.CheckBox_Dict["Timestamp"].toggled.connect(lambda state: self.getStateInput("Timestamp", state))
self.CheckBox_Dict["Scale"].toggled.connect(lambda state: self.getStateInput("Scale", state))
self.CheckBox_Dict["Grid"].toggled.connect(lambda state: self.getStateInput("Grid", state))
self.CheckBox_Dict["Contour"].toggled.connect(lambda state: self.getStateInput("Contour", state))
self.CheckBox_Dict["VelVectors"].toggled.connect(lambda state: self.getStateInput("VelVectors", state))
self.CheckBox_Dict["VelStreamlines"].toggled.connect(lambda state: self.getStateInput("VelStreamlines", state))
self.CheckBox_Dict["MagVectors"].toggled.connect(lambda state: self.getStateInput("MagVectors", state))
self.CheckBox_Dict["MagStreamlines"].toggled.connect(lambda state: self.getStateInput("MagStreamlines", state))
self.CheckBox_Dict["LineAnno"].toggled.connect(lambda state: self.getStateInput("LineAnno", state))
self.CheckBox_Dict["XLog"].toggled.connect(lambda state: self.getStateInput("XLog", state))
self.CheckBox_Dict["YLog"].toggled.connect(lambda state: self.getStateInput("YLog", state))
self.CheckBox_Dict["ZLog"].toggled.connect(lambda state: self.getStateInput("ZLog", state))
self.CheckBox_Dict["SetAspect"].toggled.connect(lambda state: self.getStateInput("SetAspect", state))
self.CheckBox_Dict["QuitDialog"].toggled.connect(lambda state: self.getStateInput("QuitDialog", state))
def getStateInput(self, key, state):
"""Store the CheckBox input as a string that can be saved in the config
file."""
boolString = "no"
if state:
boolString = "yes"
self.Config_Dict["CheckBoxes_" + key] = boolString
def getColorInput(self, text):
"""Read out the input of the color scheme and give feedback if it is
valid"""
if text not in validColors:
self.Misc_Dict["colorscheme"].turnTextRed()
self.Config_Dict["Misc_colorscheme"] = "viridis"
else:
self.Misc_Dict["colorscheme"].turnTextBlack()
self.Config_Dict["Misc_colorscheme"] = text
def getComboInput(self, key):
"""Read out the input of the time quantity"""
self.Config_Dict[f"Misc_{key}"] = self.Misc_Dict[key].currentText()
def getGridInput(self, text):
"""Read out grid unit input and give feedback"""
# reference unit
fieldUnit = YTQuantity(1, "au").units
from yt.units.unit_object import UnitParseError
lineEdit = self.Misc_Dict["gridunit"]
try:
textUnit = YTQuantity(1, text).units
if fieldUnit.same_dimensions_as(textUnit):
lineEdit.turnTextBlack()
newUnit = lineEdit.text()
else:
lineEdit.turnTextRed()
newUnit = str(fieldUnit)
except (UnitParseError, AttributeError, TypeError):
lineEdit.turnTextRed()
newUnit = str(fieldUnit)
self.Config_Dict["Misc_gridunit"] = newUnit
def getHomeDir(self):
"""Opens a dialog to receive the home directory"""
directory = QW.QFileDialog.getExistingDirectory(self, "Select a home "
"directory",
self.homeDir)
if directory != '':
self.homeDir = directory
self.directoryLabel.setText(f"Current home directory:\n{directory}")
def saveSettings(self):
"""Handles the saving operations"""
config["Path"]["homedir"] = self.homeDir
config["Logging"]["yt"] = str(self.LogDialog.ytInput)
config["Logging"]["GUI"] = str(self.LogDialog.GUIInput)
config["Logging"]["MaxBlocks"] = str(self.LogDialog.blockCount)
if not self.Param_Dict["isValidFile"]: # only if no file is loaded
self.Param_Dict["Directory"] = self.homeDir
self.parent().Status_Dict["Dir"].setText(self.homeDir)
for key in self.Config_Dict.keys():
group = key.split("_")[0]
subkey = key.split("_")[1]
config[group][subkey] = self.Config_Dict[key]
saveConfigOptions(log=True)
self.accept()
def cancelPressed(self):
"""Handles the Button press of 'Cancel'"""
GUILogger.info("No settings saved.")
self.reject()
def getHomeDirectory():
"""Checks whether a home directory is stored in the config file. If not,
determines the directory the GUI is started from and saves it as the home
directory.
Returns:
directory: str: The working directory"""
directory = config["Path"]["homedir"]
if directory == "" or not os.path.isdir(directory):
# https://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory
filename = inspect.getframeinfo(inspect.currentframe()).filename
moduleDir = os.path.dirname(os.path.abspath(filename))
# some string formatting to remove the last bit:
directory = "\\".join(moduleDir.split("\\")[:-1])
GUILogger.info("Couldn't locate home directory. Default directory is "
"set to the directory the program was started in.")
return directory
def loadConfigOptions(Window):
"""Sets the widgets according to the settings given through the config
file"""
text = config["Misc"]["colorscheme"]
Window.Edit_Dict["ColorScheme"].setText(text)
Window.Edit_Dict["ColorScheme"].setPlaceholderText(text)
text = config["Misc"]["gridunit"]
Window.Edit_Dict["GridUnit"].setText(text)
Window.Edit_Dict["GridUnit"].setPlaceholderText(text)
text = config["Misc"]["timequantity"]
Window.ComboBox_Dict["TimeQuantity"].setCurrentText(text)
text = config["Misc"]["weightfield"]
Window.ComboBox_Dict["YWeight"].setCurrentText(text)
Window.Misc_Dict["LogBox"].document().setMaximumBlockCount(config.getint("Logging", "MaxBlocks"))
GUILogger.setLevel(config.getint("Logging", "GUI"))
ytLogger.setLevel(config.getint("Logging", "yt"))
GUILogger.info("Logs and additional Information will be displayed here.")
GUILogger.log(29, "You can <b>change the logging level</b> by pressing <i>ctrl + L</i> or in the <i>Options</i> menu.")
GUILogger.info("Please open a (FLASH-)simulation file or a time series to get started.")
for key in Window.CheckBox_Dict.keys():
try:
Window.CheckBox_Dict[key].setChecked(config.getboolean("CheckBoxes", key))
except NoOptionError:
pass
def saveConfigOptions(log=False):
"""Convenience method to store the config options in the config file."""
with open("simgui_registry/GUIconfig.ini", "w") as configfile:
config.write(configfile)
if log:
GUILogger.log(29, "New configuration settings have successfully been stored.")
| Fabian-Balzer/GUFY | GUFY/simgui_modules/configureGUI.py | configureGUI.py | py | 15,735 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "PyQt5.Q... |
73485125223 | # https://programmers.co.kr/learn/courses/30/lessons/72415
from collections import defaultdict
import heapq
dr = (-1, 1, 0, 0)
dc = (0, 0, -1, 1)
def get_cnt(board, r, c, _r, _c):
cnt_map = [[float('inf') for _ in range(4)] for _ in range(4)]
cnt_map[r][c] = 0
pq = [(0, r, c)]
heapq.heapify(pq)
while pq:
curr_cnt, curr_r, curr_c = heapq.heappop(pq)
if curr_r == _r and curr_c == _c:
return curr_cnt
for d in range(4):
r, c, cnt = curr_r, curr_c, 0
while 0 <= r + dr[d] < 4 and 0 <= c + dc[d] < 4:
r += dr[d]
c += dc[d]
cnt += 1
if board[r][c]:
break
if cnt_map[r][c] > curr_cnt + cnt:
cnt_map[r][c] = curr_cnt + cnt
heapq.heappush(pq, (curr_cnt + cnt, r, c))
if cnt_map[r][c] > curr_cnt + 1:
cnt_map[r][c] = curr_cnt + 1
heapq.heappush(pq, (curr_cnt + 1, r, c))
def get_min_cnt(board, r, c):
if not sum(map(sum, board)):
return 0
min_cnt = float('inf')
locations = defaultdict(list)
for row in range(len(board)):
for col in range(len(board[0])):
if board[row][col]:
locations[board[row][col]].append((row, col))
for card in locations.keys():
(r1, c1), (r2, c2) = locations[card]
cnt12 = get_cnt(board, r1, c1, r2, c2) + 2
cnt1 = get_cnt(board, r, c, r1, c1) + cnt12
cnt21 = get_cnt(board, r2, c2, r1, c1) + 2
cnt2 = get_cnt(board, r, c, r2, c2) + cnt21
board[r1][c1] = board[r2][c2] = 0
min_cnt = min(
min_cnt,
cnt1 + get_min_cnt(board, r2, c2),
cnt2 + get_min_cnt(board, r1, c1)
)
board[r1][c1] = board[r2][c2] = card
return min_cnt
def solution(board, r, c):
return get_min_cnt(board, r, c)
| lexiconium/algorithms | programmers/2021_KAKAO_BLIND_RECRUITMENT/72415.py | 72415.py | py | 1,997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "heapq.heapify",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line... |
37069874151 | #!/usr/bin/python3
"""
function that queries the Reddit API and prints the titles
of the first 10 hot posts listed for a given subreddit.
"""
import requests
def top_ten(subreddit):
"""
function that queries subredit hot topics
"""
params = {'limit': 10}
url = f"https://www.reddit.com/r/{subreddit}/hot.json"
headers = {'User-Agent': 'RedditSubscriberApp/1.0'}
try:
response = requests.get(url, headers=headers,
params=params, allow_redirects=False)
if response.status_code == 200:
data = response.json()
posts = data['data']['children']
for i, post in enumerate(posts, 1):
title = post['data']['title']
print(f"{title}")
elif response.status_code == 302:
print("None")
else:
print("None")
except requests.exceptions.RequestException as e:
print(f"An error occurred: {str(e)}")
return 0
| wughangar/alx-system_engineering-devops | 0x16-api_advanced/1-top_ten.py | 1-top_ten.py | py | 1,000 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "requests.exceptions",
"line_number": 35,
"usage_type": "attribute"
}
] |
2548310 | import pytest
from comicgeeks import Comic_Geeks
from dotenv import dotenv_values
from pathlib import Path
dotenv_path = Path(".devdata.env")
env = dotenv_values(dotenv_path=dotenv_path)
if "LCG_CI_SESSION" not in env:
import os
env = {
"LCG_CI_SESSION": os.environ.get("LCG_CI_SESSION"),
"LCG_USERNAME": os.environ.get("LCG_USERNAME"),
"LCG_PASSWORD": os.environ.get("LCG_PASSWORD"),
}
__author__ = "Pablo Ruiz"
__copyright__ = "Pablo Ruiz"
__license__ = "GPL-3.0-only"
def test_get_issue_by_id():
"""Get issue by id test"""
# Also test .json() function
client = Comic_Geeks()
data = client.issue_info(3616996).json()
assert data["issue_id"] == 3616996
assert len(data["characters"]) > 0
cover = data["cover"]
assert cover["name"] == "Daredevil #8" and cover["image"] != "#"
community = data["community"]
assert (
(
community["pull"] >= 1
if type(community["pull"]) is int
else community["pull"] == "Unknown"
)
and (
community["collect"] >= 1
if type(community["collect"]) is int
else community["collect"] == "Unknown"
)
and (
community["readlist"] >= 1
if type(community["readlist"]) is int
else community["readlist"] == "Unknown"
)
and (
community["wishlist"] >= 1
if type(community["wishlist"]) is int
else community["wishlist"] == "Unknown"
)
and (
community["rating"] >= 1
if type(community["rating"]) is int
else community["rating"] == "Unknown"
)
)
assert data["description"] != ""
assert data["details"] == {
"format": "comic",
"page_count": "28 pages",
"cover_date": "sep 2019",
"upc": "75960609142300811",
"distributor_sku": "may190864",
}
assert data["name"] == "No Devils, Only God, Part 3"
assert data["number"] == "8"
assert len(data["person_credits"]) > 0
assert data["price"] == 3.99
assert data["publisher"] == "Marvel Comics"
pagination = data["series_pagination"]
assert all(
map(
lambda x: pagination[x] is not None,
pagination.keys(),
)
)
assert data["store_date"] == 1563321600
assert data["url"] == "/comic/3616996/daredevil-8"
assert len(data["variant_covers"]) >= 2
user = data["user"]
assert all(map(lambda x: user[x] is None, user.keys()))
def test_get_issue_by_id_session():
"""Get issue by id test"""
client = Comic_Geeks(env["LCG_CI_SESSION"])
data = client.issue_info(3616996)
assert any(map(lambda x: data.user[x] is not None, data.user.keys()))
def test_get_issue_without_characters():
"""Get issue without characters credits test"""
client = Comic_Geeks()
data = client.issue_info(3943557)
assert len(data.characters) == 0
def test_get_issue_without_variant_covers():
"""Get issue without variant covers test"""
client = Comic_Geeks()
data = client.issue_info(7757146)
assert len(data.variant_covers) == 0
## TODO: issue without creator credits
def test_add_to_collection_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.add_to_collection()
assert data["type"] == "error"
def test_add_to_wishlist_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.add_to_wishlist()
assert data["type"] == "error"
def test_mark_read_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.mark_read()
assert data["type"] == "error"
def test_pull_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.pull()
assert data["type"] == "error"
def test_rate_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.rate(4)
assert data["type"] == "error"
def test_remove_collection_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.remove_from_collection()
assert data["type"] == "error"
def test_remove_readlist_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.remove_from_readlist()
assert data["type"] == "error"
def test_remove_wishlist_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.remove_from_wishlist()
assert data["type"] == "error"
def test_unsubscribe_error():
client = Comic_Geeks()
issue = client.issue_info(7757146)
data = issue.unsubscribe()
assert data["type"] == "error"
def test_add_to_collection():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.add_to_collection()
assert data["type"] == "success"
def test_add_to_wishlist():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.add_to_wishlist()
assert data["type"] == "success"
def test_mark_read():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.mark_read()
assert data["type"] == "success"
def test_pull():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.pull()
assert data["type"] == "success"
def test_rate():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.rate(0)
assert data["type"] == "success"
def test_rate_invalid_error():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.rate(20)
assert data["type"] == "error"
def test_remove_collection():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.remove_from_collection()
assert data["type"] == "success"
def test_remove_readlist():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.remove_from_readlist()
assert data["type"] == "success"
def test_remove_wishlist():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.remove_from_wishlist()
assert data["type"] == "success"
def test_unsubscribe():
client = Comic_Geeks(env["LCG_CI_SESSION"])
issue = client.issue_info(7757146)
data = issue.unsubscribe()
assert data["type"] == "success"
| pruizlezcano/comicgeeks | tests/test_Issue.py | test_Issue.py | py | 6,575 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "dotenv.dotenv_values",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line... |
14681878400 | import csv
import math
import os
import sys
import torch
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
import Feedforward
import Kalman
from BKF_testing import start_test
from CircleGenerator import CirclesDataset, create_directory, save_tensor
from DeviceDataLoader import DeviceDataLoader, get_default_device
from datetime import datetime
device = get_default_device()
def get_val_loss(FFModel, KalmanModel, val_loader, loss_function, random_start_pos, random_start_vel, upper_bound_L_hat,
simplified_cov_update):
with torch.no_grad():
FFModel.eval()
KalmanModel.eval()
total_loss = 0.0
num_batches = 0
for batch in val_loader:
num_batches += 1
loss, _, _ = end_to_end_training_step(FFModel, KalmanModel, batch, loss_function, random_start_pos,
random_start_vel, upper_bound_L_hat, simplified_cov_update)
total_loss += loss
return total_loss / num_batches
def epoch_end(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}, last_lr: {:.4f}".format(
epoch, result['train_loss'], result['val_loss'], result['lr'][-1]))
def epoch_end_no_lr(self, epoch, result):
print("Epoch [{}], train_loss: {:.4f}, val_loss: {:.4f}".format(
epoch, result['train_loss'], result['val_loss']))
def apply_upper_bound(L_hat_List):
# L_hat_List tensor of shape (N * T, 3)
upper_bound = 2.0
lower_bound = -2.0
reference_tensor_up = torch.tensor([upper_bound, math.exp(upper_bound), upper_bound]) \
.repeat(L_hat_List.shape[0], 1).to(device)
reference_tensor_down = torch.tensor([lower_bound, -math.exp(lower_bound), lower_bound]) \
.repeat(L_hat_List.shape[0], 1).to(device)
condition = L_hat_List > reference_tensor_up
L_hat_List[condition] = upper_bound
condition = L_hat_List < reference_tensor_down
L_hat_List[condition] = lower_bound
def end_to_end_training_step(FFModel, KalmanModel, sample_batched, loss_function, random_start_pos, random_start_vel,
upper_bound_L_hat, simplified_cov_update):
# N is batch size
# T is amt of frames in sequence
# Sample dimensions (images, positions), images: (N, T, channels, width, height), labels: (N, T, (x, y, vx, vy) )
images = sample_batched[0].permute(1, 0, 2, 3, 4)
positions = sample_batched[1].permute(1, 0, 2)[:, :, :2]
vels = sample_batched[1].permute(1, 0, 2)[:, :, 2:]
# Format data
# Images in sequence, not including first image (T-1, N, 3, 128, 128)
images = torch.stack([images[ii] for ii in range(1, len(images))]).float().to(device)
first_frame_positions = None
first_frame_vels = None
if not random_start_pos:
# First frames of every sequence in batch (N, 2)
first_frame_positions = positions[0]
if not random_start_vel:
# First frames of every sequence in batch (N, 2)
first_frame_vels = vels[0]
# (T, N, 2)
positions = torch.stack([positions[ii] for ii in range(1, len(positions))]).float().to(device)
# Reshape images so everything can be processed in parallel by utilizing batch size
T, N, C, W, H = images.shape[0], images.shape[1], images.shape[2], images.shape[3], images.shape[4]
seq_images = images.view(T * N, C, W, H)
# Forward pass
# output (T * N, dim_output)
z_list, L_hat_list = FFModel(seq_images)
if upper_bound_L_hat:
apply_upper_bound(L_hat_list)
# Decompress the results into original images format
z_list = z_list.view(T, N, z_list.shape[1])
L_hat_list = L_hat_list.view(T, N, L_hat_list.shape[1])
# Pass through KF
position_prediction = KalmanModel(z_list, L_hat_list, first_frame_positions, first_frame_vels,
simplified_cov_update)
loss = loss_function(position_prediction, positions)
return loss, position_prediction, L_hat_list
def fit(epochs, lr, FFModel, KalmanModel, train_loader, val_loader, opt_func, loss_function, on_server, save_matrices,
save_predictions, save_lrs, lr_scheduling, random_start_pos, random_start_vel, path_model,
upper_bound_L_hat, simplified_cov_update):
history = []
predictionsDict = dict()
matricesDict = dict()
lrsDict = dict()
optimizer = opt_func(list(FFModel.parameters()) + list(KalmanModel.parameters()), lr)
sched = None
lowest_val_loss = None
loss = None
model_dict = None
if lr_scheduling:
sched = torch.optim.lr_scheduler.OneCycleLR(optimizer, lr, epochs=epochs, steps_per_epoch=len(train_loader))
for epoch in range(epochs):
# Training Phase
FFModel.train()
KalmanModel.train()
train_losses = []
lrs = []
for i, batch in enumerate(train_loader):
loss, predictions, L_hat_list = end_to_end_training_step(FFModel, KalmanModel, batch, loss_function,
random_start_pos,
random_start_vel, upper_bound_L_hat,
simplified_cov_update)
train_losses.append(loss)
loss.backward()
optimizer.step()
if lr_scheduling:
lrs.append(get_lr(optimizer))
sched.step()
if i == int(len(train_loader.dl.dataset) / train_loader.dl.batch_size) - 1:
# Save last tensor of last batch with its label and prediction
if save_predictions:
tensor = batch[0][-1][-1]
label = batch[1][-1][-1]
prediction = predictions[-1][-1]
if "tensor" not in predictionsDict:
predictionsDict["tensor"] = tensor
predictionsDict[epoch] = (label, prediction)
if save_matrices:
matricesDict[epoch] = ((KalmanModel.diag_Q.clone(),
KalmanModel.diag_Q.grad.clone()), L_hat_list)
if save_lrs:
lrsDict[epoch] = get_lr(optimizer)
optimizer.zero_grad()
# Validation phase
mean_val_loss = get_val_loss(FFModel, KalmanModel, val_loader, loss_function, random_start_pos,
random_start_vel, upper_bound_L_hat, simplified_cov_update)
if lr_scheduling:
result = {'train_loss': torch.stack(train_losses).mean().item(), 'val_loss': mean_val_loss, 'lr': lrs}
epoch_end(epoch, result)
else:
result = {'train_loss': torch.stack(train_losses).mean().item(), 'val_loss': mean_val_loss}
epoch_end_no_lr(epoch, result)
history.append(result)
if path_model is not None and epoch > epochs // 2:
# Save model if lowest val loss
val_loss = result['val_loss']
if lowest_val_loss is None or lowest_val_loss > val_loss:
lowest_val_loss = val_loss
model_dict = {
"epoch": epoch,
"ff_state_dict": FFModel.state_dict(),
"kalman_state_dict": KalmanModel.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
"loss": loss.item()
}
path = ""
if on_server:
path += f"/export/home2/NoCsBack/thesisdt/"
done = datetime.now().strftime("%d-%m-%Y_%H-%M")
path += f"WV/logs/bkf_logs_paper/t_{len(train_loader.dl.dataset)}_v_{len(val_loader.dl.dataset)}_b_{train_loader.dl.batch_size}_" \
f"{done}/"
if save_predictions or save_matrices:
create_directory(path)
if save_matrices:
save_matrices_as_csv(matricesDict, path)
if save_predictions:
save_tensor_label_prediction(predictionsDict, path)
if save_lrs:
save_lrs_as_csv(lrsDict, path)
if path_model is not None:
done = datetime.now().strftime("%d-%m-%Y_%H-%M")
model_dir = f"{path_model}_loss_{lowest_val_loss}_{done}"
create_directory(model_dir)
torch.save(model_dict, f"{model_dir}/lowest_val_loss.tar")
return history, model_dir + "/lowest_val_loss.tar"
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
def plot_losses(history):
train_losses = [x.get('train_loss') for x in history]
val_losses = [x.get('val_loss').item() for x in history]
plt.figure()
plt.plot(train_losses, '-bx')
plt.plot(val_losses, '-rx')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(['Training', 'Validation'])
plt.title('Loss vs. No. of epochs')
plt.show()
def save_matrices_as_csv(matricesDict, path):
csv_Q = "matrix_Q.csv"
csv_L_hat = "matrix_L_hat"
save_matrix_and_gradient_as_csv("Q", path, csv_Q, matricesDict, 0)
save_matrix_as_csv("L_hat", path, csv_L_hat, matricesDict, 1)
def save_matrix_as_csv(name, path, csv_name, matricesDict, indexDict):
with open(path + csv_name, "w") as csvfile:
writer = csv.writer(csvfile)
# Header: epoch | matrix name | gradient
writer.writerow(["epoch", "matrix " + name])
for epoch in range(0, len(matricesDict)):
writer.writerow([epoch, matricesDict[epoch][indexDict].cpu().detach().numpy()])
def save_matrix_and_gradient_as_csv(name, path, csv_name, matricesDict, indexDict):
with open(path + csv_name, "w") as csvfile:
writer = csv.writer(csvfile)
# Header: epoch | matrix name | gradient
writer.writerow(["epoch", "matrix " + name, "gradient"])
for epoch in range(0, len(matricesDict)):
writer.writerow([epoch, matricesDict[epoch][indexDict][0].cpu().detach().numpy(),
matricesDict[epoch][indexDict][1].cpu().detach().numpy()])
def save_lrs_as_csv(lrsDict, path):
csv_name = "lrs.csv"
with open(path + csv_name, "w") as csvfile:
writer = csv.writer(csvfile)
# Header: epoch | lr
writer.writerow(["epoch", "lr "])
for epoch in range(0, len(lrsDict)):
writer.writerow([epoch, lrsDict[epoch]])
def save_tensor_label_prediction(predictionDict, path):
save_tensor(predictionDict["tensor"].cpu(), path, "tensor.pt")
csv_name = "tensor_label_pred.csv"
with open(path + csv_name, "w") as csvfile:
writer = csv.writer(csvfile)
# Header: epoch | label x | label y | prediction x | prediction y
writer.writerow(["epoch", "label x", "label y", "prediction x", "prediction y"])
# Skip the tensor entry
for epoch in range(0, len(predictionDict) - 1):
writer.writerow([epoch, predictionDict[epoch][0][0].item(), predictionDict[epoch][0][1].item(),
predictionDict[epoch][1][0].item(), predictionDict[epoch][1][1].item()])
def train_and_validate(nr_of_circles, nr_of_sequences_train, nr_of_sequences_val, nr_of_frames_train, nr_of_frames_val,
nr_of_epochs, batch_size, on_server, save_matrices, save_predictions, save_lrs, lr,
lr_scheduling,
random_start_pos, random_start_vel, static_seq, save_model, upper_bound_L_hat,
simplified_cov_update, change_shape, ff_model_path):
train_csv_file = ''
val_csv_file = ''
if on_server:
train_csv_file += '/export/home2/NoCsBack/thesisdt/WV/'
val_csv_file += '/export/home2/NoCsBack/thesisdt/WV/'
if static_seq:
train_csv_file += f'datasets/seq_nonmoving_circles/'
val_csv_file += f'datasets/seq_nonmoving_circles/'
else:
train_csv_file += f'datasets/linear_moving_circles_bkf/'
val_csv_file += f'datasets/linear_moving_circles_bkf/'
train_csv_file += f'circles={nr_of_circles}_frames={nr_of_frames_train}_noise=None' \
f'/train={nr_of_sequences_train}/train.csv'
val_csv_file += f'circles={nr_of_circles}_frames={nr_of_frames_val}_noise=None' \
f'/validation={nr_of_sequences_val}/validation.csv'
print("Training on: ")
print(train_csv_file)
print("----------------------------------")
train_ds = CirclesDataset(train_csv_file)
val_ds = CirclesDataset(val_csv_file)
train_dl = DataLoader(train_ds, batch_size, pin_memory=True)
val_dl = DataLoader(val_ds, batch_size, pin_memory=True)
train_dl = DeviceDataLoader(train_dl, device)
val_dl = DeviceDataLoader(val_dl, device)
opt_func = torch.optim.Adam
loss_func = torch.nn.MSELoss()
FFModel = Feedforward.FFNetwork().to(device)
KalmanModel = Kalman.KalmanFilter(device, batch_size).to(device)
path_save_model = None
if save_model:
path_save_model = ""
if on_server:
path_save_model += f"/export/home2/NoCsBack/thesisdt/WV/"
changing = ''
if change_shape:
changing = 'changing_'
path_save_model += f"logs/models/bkf/{changing}circles={nr_of_circles}/t_{len(train_dl.dl.dataset)}_v_" \
f"{len(val_dl.dl.dataset)}_b_{batch_size}"
# Load trained feedforward model
if ff_model_path is not None:
checkpoint = torch.load(ff_model_path)
FFModel.load_state_dict(checkpoint['ff_state_dict'])
print("Using trained FFModel")
print(checkpoint['path_training_set'])
print("----------------------------------")
print("ff model", ff_model_path)
print("nr of circles: ", nr_of_circles)
print("nr of sequences train: ", nr_of_sequences_train)
print("nr of sequences val: ", nr_of_sequences_val)
print("lr: ", lr)
print("lr sched: ", lr_scheduling)
print("batch size: ", batch_size)
print("epochs: ", nr_of_epochs)
print("random_start_pos: ", random_start_pos)
print("random_start_vel: ", random_start_vel)
print("change shape: ", change_shape)
history, model_path = fit(nr_of_epochs, lr, FFModel, KalmanModel, train_dl, val_dl, opt_func, loss_func, on_server,
save_matrices, save_predictions, save_lrs, lr_scheduling, random_start_pos,
random_start_vel,
path_save_model, upper_bound_L_hat, simplified_cov_update)
plot_losses(history)
return model_path
if __name__ == "__main__":
on_server = False
save_matrices = False
save_predictions = False
save_lrs = False
random_start_pos = False
random_start_vel = True
static_seq = False
save_model = True
upper_bound_L_hat = False
simplified_cov_update = False
if len(sys.argv) == 12:
nr_of_sequences_train, sequence_length_train, nr_of_sequences_val, sequence_length_val, nr_of_circles, \
change_shape, nr_of_epochs, batch_size, ff_model_path, lr, lr_sched = sys.argv[1:]
train_and_validate(nr_of_circles=int(nr_of_circles), nr_of_sequences_train=int(nr_of_sequences_train),
nr_of_sequences_val=int(nr_of_sequences_val), nr_of_frames_train=int(sequence_length_train),
nr_of_frames_val=int(sequence_length_val), nr_of_epochs=int(nr_of_epochs),
batch_size=int(batch_size), on_server=False, save_matrices=save_matrices,
save_predictions=save_predictions, save_lrs=save_lrs,
change_shape=(change_shape == 'change_shape'), lr=float(lr),
lr_scheduling=(lr_sched == 'lr_sched'), random_start_pos=random_start_pos,
random_start_vel=random_start_vel, static_seq=static_seq, save_model=True,
upper_bound_L_hat=False, simplified_cov_update=False, ff_model_path=ff_model_path)
else:
raise Exception("Wrong number of arguments. Following arguments expected: "
"<nr_of_sequences_train> <sequence_length_train> <nr_of_sequences_val> "
"<sequence_length_val> <nr_of_circles> <change_shape> <nr_of_epochs> <batch_size> "
"<ff_model_path> <lr> <lr_sched>")
| tiboat/BackpropKF_Reproduction | BKF.py | BKF.py | py | 16,374 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "DeviceDataLoader.get_default_device",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "math.e... |
20579907742 | import numpy as np
import matplotlib.pyplot as plt
from tkinter import *
from tkinter import messagebox
# Own Fuctions
from harmGen import *
from transformLibrary import *
from visualization import *
from preSettings import preSet # Optional: copy another PreSet.py file with new default values to calculate
# === GUI ===
# Window
window = Tk()
# Menu Bar
def aboutMsg():
content = ['Alpha–beta transformation',
'of harmonics v1.0',
' ' ,
'Jose Fernando Diaz Benavides',
'Copyright © 2019',
' ',
'This program is licenced under Apache 2.0']
messagebox.showinfo('About', "\n".join(content))
menubar = Menu(window)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=aboutMsg)
menubar.add_cascade(label="Help", menu=helpmenu)
window.config(menu=menubar)
# Put Labeles
putLabels(window)
# Put Entries
entries(window)
# Put Grid
putGrid(window)
# Pre-settings
preSet(window)
def calculate():
plt.close()
# Read Inputs
fs = float(window.efs.get()) # Sampling frequency
time_total = float(window.etime_total.get()) # Total sampled time
time = timeVector(fs,time_total)
# Signal 1
#L1
a0_a = float(window.e_a0_a.get())
a0_ph = float(window.e_a0_ph.get())
a0_f = float(window.e_a0_f.get())
a0_dc = float(window.e_a0_dc.get())
#L2
b0_a = float(window.e_b0_a.get())
b0_ph = float(window.e_b0_ph.get())
b0_f = float(window.e_b0_f.get())
b0_dc = float(window.e_b0_dc.get())
#L3
c0_a = float(window.e_c0_a.get())
c0_ph = float(window.e_c0_ph.get())
c0_f = float(window.e_c0_f.get())
c0_dc = float(window.e_c0_dc.get())
# Signal 2
#L1
a1_a = float(window.e_a1_a.get())
a1_ph = float(window.e_a1_ph.get())
a1_f = float(window.e_a1_f.get())
a1_dc = float(window.e_a1_dc.get())
#L2
b1_a = float(window.e_b1_a.get())
b1_ph = float(window.e_b1_ph.get())
b1_f = float(window.e_b1_f.get())
b1_dc = float(window.e_b1_dc.get())
#L3
c1_a = float(window.e_c1_a.get())
c1_ph = float(window.e_c1_ph.get())
c1_f = float(window.e_c1_f.get())
c1_dc = float(window.e_c1_dc.get())
# Signal 3
#L1
a2_a = float(window.e_a2_a.get())
a2_ph = float(window.e_a2_ph.get())
a2_f = float(window.e_a2_f.get())
a2_dc = float(window.e_a2_dc.get())
#L2
b2_a = float(window.e_b2_a.get())
b2_ph = float(window.e_b2_ph.get())
b2_f = float(window.e_b2_f.get())
b2_dc = float(window.e_b2_dc.get())
#L3
c2_a = float(window.e_c2_a.get())
c2_ph = float(window.e_c2_ph.get())
c2_f = float(window.e_c2_f.get())
c2_dc = float(window.e_c2_dc.get())
# === LIST OF PARAMETERS ===
# List of Parameters: amplitude,phase,frequency,time vector, dc
a_0 = [a0_a,a0_ph,a0_f,time,a0_dc] # L1 Signal 1
b_0 = [b0_a,b0_ph,b0_f,time,b0_dc] # L2
c_0 = [c0_a,c0_ph,c0_f,time,c0_dc] # L3
a_1 = [a1_a,a1_ph,a1_f,time,a1_dc] # Signal 2
b_1 = [b1_a,b1_ph,b1_f,time,b1_dc]
c_1 = [c1_a,c1_ph,c1_f,time,c1_dc]
a_2 = [a2_a,a2_ph,a2_f,time,a2_dc] # Signal 3
b_2 = [b2_a,b2_ph,b2_f,time,b2_dc]
c_2 = [c2_a,c2_ph,c2_f,time,c2_dc]
############# CALCULATE ##################
# === SIGNAL 0 ===
a_s_0 = sinusSignal(a_0)
b_s_0 = sinusSignal(b_0)
c_s_0 = sinusSignal(c_0)
# === SIGNAL 1 ===
a_s_1 = sinusSignal(a_1)
b_s_1 = sinusSignal(b_1)
c_s_1 = sinusSignal(c_1)
# === SIGNAL 2 ===
a_s_2 = sinusSignal(a_2)
b_s_2 = sinusSignal(b_2)
c_s_2 = sinusSignal(c_2)
# === ADD OS ===
a_total = a_s_0 + a_s_1 + a_s_2
b_total = b_s_0 + b_s_1 + b_s_2
c_total = c_s_0 + c_s_1 + c_s_2
# === ALPHA BETTA GAMMA ===
alpha, beta, gamma, amplitude, angle = clarkeTransformation(a_total,b_total,c_total)
# === PLOT ===
plotAll(a_total,b_total,c_total,alpha, beta, gamma, amplitude, angle, time)
# === CALCULATE END ===
# Button
window.button = Button( master = window,
text = 'Show plots',
command = calculate)
window.button.grid(row=20, column=3)
window.mainloop() | josediazb/alpha-beta-harmonics | clarkeos.py | clarkeos.py | py | 4,883 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tkinter.messagebox.showinfo",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "preSettings.preSet",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "mat... |
73850392743 | from django.urls import path
from . import views
# /articles/ ___
app_name = 'articles'
urlpatterns = [
# 입력 페이지 제공
path('', views.index, name='index'),
# /articles/10/ 이런식으로 몇번 게시글 보여주세요 라는 의미이다.
path('<int:article_pk>/', views.detail, name='detail'), # detail
# path('new/', views.new, name='new'),
# create로 들어올 것이기 때문에 new의 url은 없애버렸다.
path('create/', views.create, name='create'),
path('<int:article_pk>/delete/', views.delete, name='delete'),
path('<int:article_pk>/update/', views.update, name='update'),
path('<int:article_pk>/comments/', views.comments_create, name='comments_create'),
# /articles/3/comments/2/delete 이런식으로 url을 만들어주고 싶다.
path('<int:article_pk>/comments/<int:comment_pk>/delete/', views.comments_delete, name='comments_delete'),
]
| blueboy1593/Django | Django_crud/articles/urls.py | urls.py | py | 923 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
1270205424 | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="PyPolkaSideCar-mickstar", # Replace with your own username
version="0.0.1",
author="Michael Johnston",
author_email="michael.johnston29@gmail.com",
description="A Simple wrapper for the substrate sidecar for polkadot nodes",
license="GPLv3",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/mickstar/PyPolkaSideCar",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: GPLv3",
"Operating System :: OS Independent",
],
packages=setuptools.find_packages(),
install_requires=[
"requests"
],
python_requires='>=3.6',
)
| mickstar/PyPolkaSideCar | setup.py | setup.py | py | 823 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 21,
"usage_type": "call"
}
] |
29696788586 | import utils
def read_input(path):
return [(l[0], int(l[1]))
for l in [l.split() for l in utils.read_lines(path)]]
def part1(path):
input = read_input(path)
h = 0
v = 0
for m in input:
if m[0] == "forward":
h += m[1]
elif m[0] == "up":
v -= m[1]
else:
v += m[1]
print(h * v)
def part2(path):
input = read_input(path)
h = 0
v = 0
aim = 0
for m in input:
if m[0] == "forward":
h += m[1]
v += aim * m[1]
elif m[0] == "up":
aim -= m[1]
else:
aim += m[1]
print(h * v) | dialogbox/adventofcode | py/2021/day2.py | day2.py | py | 666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.read_lines",
"line_number": 6,
"usage_type": "call"
}
] |
16097700723 | from player import Player
from typing import List
class LineUp:
def __init__(self):
self.players:List[Player] = []
def addPlayer(self, player: Player):
self.players.append(player)
def print(self, otherLineup):
if(otherLineup == 0):
for player in self.players:
print(player)
else:
mPlayersLeaving = []
mPlayersEntering = []
for player in self.players:
if self.doesLineUpContainPlayer(otherLineup, player) == False:
mPlayersEntering.append(player)
# print(player)
for player in otherLineup.players:
if otherLineup.doesLineUpContainPlayer(self, player) == False:
mPlayersLeaving.append(player)
# print("\nPlayers Leaving:")
# for player in mPlayersLeaving:
# print(player)
# print("\nPlayers Entering:")
# for player in mPlayersEntering:
# print(player)
print("Line Up Players Leaving Players Entering")
for i in range(0, len(self.players)):
print("{0:40}".format(self.players[i].name), end=" ")
if i < len(mPlayersLeaving):
print("{0:40}".format(mPlayersLeaving[i].name), end=" ")
else:
print("{0:40}".format(""), end=" ")
if i < len(mPlayersEntering):
print("{0:40}".format(mPlayersEntering[i].name))
else:
print("{0:40}".format(""))
# row2 = "{0:40}{1:40}{2}".format(self.players[0], mPlayersLeaving[0], mPlayersEntering[0])
# row3 = "{0:40}{1:40}{2}".format(self.players[1], mPlayersLeaving[1], mPlayersEntering[1])
# row4 = "{0:40}{1:40}{2}".format(self.players[2], mPlayersLeaving[2], mPlayersEntering[2])
# row5 = "{0:40}{1:40}{2}".format(self.players[3], mPlayersLeaving[3], mPlayersEntering[3])
def doesLineUpContainPlayer(self, linueup, player:Player):
for p in linueup.players:
if p.name == player.name:
return True
return False
def swapPlayers(self, otherLineup, thisLineupPlayer:Player, thatLineupPlayer:Player):
if otherLineup.doesLineUpContainPlayer(otherLineup, thisLineupPlayer) :
print("Cannot swap a player into a line that the player already exists in!")
print("Press enter to continue")
input()
return
if self.doesLineUpContainPlayer(self, thatLineupPlayer) :
print("Cannot swap a player into a line that the player already exists in!")
#print("Press enter to continue")
#input()
return
thisIndex = self.players.index(thisLineupPlayer);
thatIndex = otherLineup.players.index(thatLineupPlayer);
self.players[thisIndex] = thatLineupPlayer;
otherLineup.players[thatIndex] = thisLineupPlayer;
print("Players swapped successfully!")
# print("Press enter to continue")
# input()
def __iter__(self):
self.current_player_index = 0
return self
def __next__(self):
if self.current_player_index < len(self.players):
current_player = self.players[self.current_player_index]
self.current_player_index += 1
return current_player
else:
raise StopIteration | JHarding86/SoccerSubs | lineup.py | lineup.py | py | 3,616 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "player.Player",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "player.Player",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "player.Player",
"line_number... |
41220371897 | # https://en.wikipedia.org/wiki/Trilateration
# https://electronics.stackexchange.com/questions/83354/calculate-distance-from-rssi
# https://stackoverflow.com/questions/4357799/convert-rssi-to-distance
# https://iotandelectronics.wordpress.com/2016/10/07/how-to-calculate-distance-from-the-rssi-value-of-the-ble-beacon/
# https://math.stackexchange.com/a/1033561/396153
# http://ambrnet.com/TrigoCalc/Circles2/circle2intersection/CircleCircleIntersection.htm
#%%
import math
import numpy as np
from numpy import sqrt, dot, cross
from numpy.linalg import norm
import pandas as pd
import plotly.express as px
data_file = "../server/data-2023-08-06-wifi-1.csv"
df = pd.read_csv(data_file, encoding='utf-8')
#%% create clean dataframe
net = pd.DataFrame()
net['time'] = df['stime']
net['src'] = df['mac']
net['dst'] = df['ssid'].apply(lambda val: val.split('-')[1])
# net['dist'] = df['rssi']
one_m_rssi = -40
net['dist'] = df['rssi'].apply(lambda val: 10 ** ((one_m_rssi - val) / (10 * 2)))
# shorten names
net['src'] = net['src'].apply(lambda val: val[-2:])
net['dst'] = net['dst'].apply(lambda val: val[-2:])
# alphabetize order
uids = set(net['src'].unique().tolist())
uids.update(set(net['dst'].unique().tolist()))
uids = sorted(list(uids))
# replace with means
netsummary = net.groupby(['src', 'dst']).aggregate({'dist': ['mean', 'std']})
netsummary_flat = netsummary.reset_index()
netsummary_flat.columns = netsummary_flat.columns.map("|".join).str.strip("|")
px.scatter_3d(
netsummary_flat,
x='src',
y='dst',
z='dist|mean',
error_z='dist|std',
color='dist|mean',
category_orders={
'src': uids,
'dst': uids,
}
).update_traces(error_z_color="black")
finals = []
for i in range(len(uids)):
for j in range(i+1, len(uids)):
finals.append({
'a': uids[i],
'b': uids[j],
'edge': f"{uids[i]}-{uids[j]}",
'dist': (netsummary.loc[uids[i], uids[j]]['dist']['mean'] +
netsummary.loc[uids[j], uids[i]]['dist']['mean']) / 2,
'err': (netsummary.loc[uids[i], uids[j]]['dist']['std'] ** 2 +
netsummary.loc[uids[j], uids[i]]['dist']['std'] ** 2) / 2,
})
finals = pd.DataFrame(finals)
finals.to_csv('wifi-1.csv', index=False)
# px.bar(finals, 'edge', 'dist', error_y='err')
px.bar(finals, 'edge', 'dist')
#%% 1d
locs = {}
a, b, c = uids[3], uids[1], uids[2]
print('Using points as base plane:', a, b, c)
# assume first point at origin
locs[a] = np.array([0, 0, 0])
# assume second point in positive x direction
a_to_b = finals[finals['edge'] == '-'.join(sorted([a, b]))]['dist'].iloc[0]
locs[b] = np.array([a_to_b, 0, 0])
# assume third point in positive y direction
a_to_c = finals[finals['edge'] == '-'.join(sorted([a, c]))]['dist'].iloc[0]
b_to_c = finals[finals['edge'] == '-'.join(sorted([b, c]))]['dist'].iloc[0]
c_x = (a_to_c ** 2 - b_to_c ** 2 + a_to_b ** 2) / (2 * a_to_b)
c_y = math.sqrt(a_to_c ** 2 - c_x ** 2)
locs[c] = np.array([c_x, c_y, 0])
# find remaining points
# https://stackoverflow.com/a/18654302/8305404
# Find the intersection of three spheres
# P1,P2,P3 are the centers, r1,r2,r3 are the radii
# Implementaton based on Wikipedia Trilateration article.
def trilaterate(P1,P2,P3,r1,r2,r3):
temp1 = P2-P1
e_x = temp1/norm(temp1)
temp2 = P3-P1
i = dot(e_x,temp2)
temp3 = temp2 - i*e_x
e_y = temp3/norm(temp3)
e_z = cross(e_x,e_y)
d = norm(P2-P1)
j = dot(e_y,temp2)
x = (r1*r1 - r2*r2 + d*d) / (2*d)
y = (r1*r1 - r3*r3 -2*i*x + i*i + j*j) / (2*j)
temp4 = r1*r1 - x*x - y*y
if temp4<0:
raise Exception("The three spheres do not intersect!")
z = sqrt(temp4)
p_12_a = P1 + x*e_x + y*e_y + z*e_z
p_12_b = P1 + x*e_x + y*e_y - z*e_z
return p_12_a, p_12_b
for p in uids:
if p in locs: continue
print('finding point', p)
a_to_p = finals[finals['edge'] == '-'.join(sorted([a, p]))]['dist'].iloc[0]
b_to_p = finals[finals['edge'] == '-'.join(sorted([b, p]))]['dist'].iloc[0]
c_to_p = finals[finals['edge'] == '-'.join(sorted([c, p]))]['dist'].iloc[0]
opt1, opt2 = trilaterate(locs[a], locs[b], locs[c], a_to_p, b_to_p, c_to_p)
locs[p] = opt1
locs
#%% | mirrorcoloured/picobeacon | analysis/analysis.py | analysis.py | py | 4,235 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "plotly.express.scatter_3d",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "plotly.exp... |
19249979909 | from functools import reduce
from typing import NamedTuple
Coord = tuple[int, int]
"""X and Y coordinates"""
Views = list[list[int]]
"""
The views from the tree looking outwards.
Each outer list is a direction. The inner list contains tree heights. The start of the list is closest to the tree.
"""
class Tree(NamedTuple):
height: int
views: Views
TreeGrid = dict[Coord, Tree]
HeightGrid = dict[Coord, int]
def get_height_grid(data: list[str]) -> tuple[HeightGrid, int]:
height_grid: HeightGrid = {}
for y_axis, line in enumerate(data):
for x_axis, char in enumerate(line):
height_grid[(x_axis, y_axis)] = int(char)
# Assumes a square grid
max_idx = len(data) - 1
return height_grid, max_idx
def get_tree_grid(height_grid: HeightGrid, max_idx: int) -> TreeGrid:
tree_grid: TreeGrid = {}
for coord, height in height_grid.items():
tree_grid[coord] = Tree(
height=height, views=get_views_from_tree(coord, height_grid, max_idx)
)
return tree_grid
def get_views_from_tree(coord: Coord, height_grid: HeightGrid, max_idx: int) -> Views:
left = [height_grid[i, coord[1]] for i in reversed(range(0, coord[0]))]
right = [height_grid[i, coord[1]] for i in range(coord[0] + 1, max_idx + 1)]
above = [height_grid[coord[0], i] for i in reversed(range(0, coord[1]))]
below = [height_grid[coord[0], i] for i in range(coord[1] + 1, max_idx + 1)]
return [left, right, above, below]
def test_is_tree_visible(coord: Coord, tree: Tree, max_idx: int) -> bool:
if coord[0] == 0 or coord[0] == max_idx or coord[1] == 0 or coord[1] == max_idx:
# Trees on edge are always visible
return True
for view in tree.views:
if tree.height > max(view):
return True
return False
def get_score_for_view(tree_height: int, view: list[int]) -> int:
counter = 0
for item in view:
if item >= tree_height:
counter += 1
break
else:
counter += 1
return counter
def get_total_scenic_score(tree: Tree) -> int:
scores: list[int] = []
for view in tree.views:
score = get_score_for_view(tree.height, view)
if score == 0:
return 0
scores.append(score)
return reduce(lambda x, y: x * y, scores)
def main() -> None:
with open("day08/data.txt") as file:
data = [line.strip() for line in file.readlines()]
height_grid, max_idx = get_height_grid(data)
tree_grid = get_tree_grid(height_grid, max_idx)
# Part 1
trees_visible = 0
for coord, tree in tree_grid.items():
if test_is_tree_visible(coord, tree, max_idx):
trees_visible += 1
print(trees_visible)
# Part 2
top_score = 0
for tree in tree_grid.values():
new_score = get_total_scenic_score(tree)
if new_score > top_score:
top_score = new_score
print(top_score)
if __name__ == "__main__":
main()
| dan-osull/python_aoc2022 | day08/code.py | code.py | py | 2,988 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.NamedTuple",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "functools.reduce",
"line_number": 78,
"usage_type": "call"
}
] |
30481483106 | from pathlib import Path
import pyotp
from Model import login, order_book, stats, trades
from time import sleep
import pandas as pd
import json
import datetime
from threading import Thread
import pickle
import os.path
class Controller:
TOKEN = ""
symbols = None
order_list = []
trade_list = []
stats_list = []
symbol_to_list_dict = {}
max_data_length = 1000
cool_down_time = 10 # seconds
trade_data_cool_down = 0
def __init__(self, keys, err_log):
self.keys = keys
self.err_log = err_log
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
with open(os.path.dirname(__file__) + "/../symbols.json", "rt") as json_file:
self.symbols = json.load(json_file)
symbol_keys = self.symbols.keys()
for index, key in enumerate(symbol_keys):
self.symbol_to_list_dict[key] = index
self.order_list.append([])
self.trade_list.append([])
self.stats_list.append([])
def _get_2fa(self, based32: str):
totp = pyotp.TOTP(based32)
return totp.now()
def request_new_token(self):
exception = ""
username = self.keys.key_dict['username']
password = self.keys.key_dict['password']
otp = str(self._get_2fa(self.keys.key_dict['2fa-backup']))
try:
response = login(username, password, otp)
except Exception as e:
response = None
exception = str(e)
while response is None or response.status_code != 200:
if response is None:
self.err_log("login requests failed to send!", exception, 1)
else:
self.err_log("login error", response.text, 1)
sleep(5)
username = self.keys.key_dict['username']
password = self.keys.key_dict['password']
otp = str(self._get_2fa(self.keys.key_dict['2fa-backup']))
try:
response = login(username, password, otp)
except Exception as e:
response = None
exception = str(e)
if response is not None and response.status_code == 200:
self.err_log("login fixed", response.text, 200)
self.TOKEN = response.json()['key']
def get_order_data(self, original_symbol: str):
symbol = original_symbol.upper() + "IRT"
try:
response = order_book(symbol)
except Exception as e:
response = None
self.err_log("Exception in get_order_data", e)
if response is not None:
if response.status_code == 200:
resp_json = response.json()
current_time = datetime.datetime.now()
bids = resp_json['bids']
for index in range(len(bids)):
tmp = [int(bids[index][0]), float(bids[index][1])]
bids[index] = tmp
asks = resp_json['asks']
for index in range(len(asks)):
tmp = [int(asks[index][0]), float(asks[index][1])]
asks[index] = tmp
timestamp = int(current_time.timestamp())
outcome = [bids, asks, timestamp]
key = self.symbol_to_list_dict[original_symbol]
self.order_list[key].append(outcome)
if len(self.order_list[key]) >= self.max_data_length:
dir_path = os.path.dirname(__file__) + "/../data/orderData/" + original_symbol + "/" + str(
current_time.year) + "/" + str(current_time.month) + "/" + str(current_time.day)
Path(dir_path).mkdir(parents=True, exist_ok=True)
with open(dir_path + "/" + str(timestamp) + ".pickle", "wb") as file:
pickle.dump(self.order_list[key], file, protocol=pickle.HIGHEST_PROTOCOL)
self.order_list[key] = []
else:
self.err_log("Exception in get_order_data;\nresponse is not OK!", response.text, response.status_code)
def get_trade_data(self, original_symbol: str):
symbol = original_symbol.upper() + "IRT"
try:
response = trades(symbol)
except Exception as e:
response = None
exception = str(e)
if response is not None:
if response.status_code == 200:
resp_json = response.json()
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp())
outcome = resp_json['trades']
key = self.symbol_to_list_dict[original_symbol]
outcome1 = []
if len(self.trade_list[key]) > 5:
for info in outcome:
sw = True
for item in self.trade_list[key][len(self.trade_list[key]) - 6:]:
if info == item:
sw = False
break
if sw:
outcome1.append(info)
else:
for info in outcome:
self.trade_list[key].append(info)
for item in outcome1:
self.trade_list[key].append(item)
if len(self.trade_list[key]) >= self.max_data_length:
dir_path = os.path.dirname(__file__) + "/../data/tradeData/" + original_symbol + "/" + str(
current_time.year) + "/" + str(current_time.month) + "/" + str(current_time.day)
Path(dir_path).mkdir(parents=True, exist_ok=True)
try:
df = pd.DataFrame(self.trade_list[key])
df.to_csv(dir_path + "/" + str(timestamp) + ".csv")
self.trade_list[key] = []
except Exception as e:
self.err_log("Exception in get_trade_data", e)
elif response.status_code == 429: # too many requests
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp())
self.trade_data_cool_down = timestamp + self.cool_down_time
else:
self.err_log("Exception in get_trade_data;\nresponse is not OK!", response.text, response.status_code)
def get_current_price(self):
symbols = self.symbols.keys()
src_string = ""
for symbol in symbols:
src_string += symbol + ","
src_string = src_string[:-1]
try:
response = stats(str(src_string), "rls")
except Exception as e:
response = None
self.err_log("Exception in get_current_price", e)
if response is None:
pass
else:
if response.status_code == 200:
resp_json = response.json()
outcome = resp_json["stats"]
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp())
try:
for attribute, value in outcome.items():
outcome[attribute]["bestSell"] = int(float(outcome[attribute]["bestSell"]))
outcome[attribute]["bestBuy"] = int(float(outcome[attribute]["bestBuy"]))
outcome[attribute]["latest"] = int(float(outcome[attribute]["latest"]))
outcome[attribute]["dayLow"] = int(float(outcome[attribute]["dayLow"]))
outcome[attribute]["dayHigh"] = int(float(outcome[attribute]["dayHigh"]))
outcome[attribute]["dayOpen"] = int(float(outcome[attribute]["dayOpen"]))
outcome[attribute]["dayClose"] = int(float(outcome[attribute]["dayClose"]))
for attribute, value in outcome.items():
symbol = attribute[:-4]
key = self.symbol_to_list_dict[symbol]
value['timeStamp'] = timestamp
self.stats_list[key].append(value)
if len(self.stats_list[key]) >= self.max_data_length:
dir_path = os.path.dirname(__file__) + "/../data/priceData/" + symbol + "/" + str(
current_time.year) + "/" + str(current_time.month) + "/" + str(current_time.day)
Path(dir_path).mkdir(parents=True, exist_ok=True)
try:
df = pd.DataFrame(self.stats_list[key])
df.to_csv(dir_path + "/" + str(timestamp) + ".csv")
self.stats_list[key] = []
except Exception as e:
self.err_log("Exception in get_current_price", e)
except Exception as e:
self.err_log("Exception in get_current_price", e)
else:
self.err_log("get current price didn't return 200", response.text, response.status_code)
def _collect_price_data(self):
while True:
try:
sleep(1)
self.get_current_price()
except Exception as e:
self.err_log("Exception in _collect_price_data", e)
def _get_orderbook_data(self, symbol):
while True:
try:
sleep(1)
self.get_order_data(symbol)
except Exception as e:
self.err_log("Exception in _get_orderbook_data", e)
def _get_trade_data(self, symbol):
while True:
try:
sleep(1)
current_time = datetime.datetime.now()
timestamp = int(current_time.timestamp())
if timestamp > self.trade_data_cool_down:
self.get_trade_data(symbol)
except Exception as e:
self.err_log("Exception in _get_trade_data", e)
def _thread_checker(self, threads_list: list):
while True:
sleep(60)
for thread in threads_list:
if not thread.is_alive():
self.err_log("one Thread is not alive", str(thread.name))
thread.start()
def start(self):
price_collector_thread = Thread(target=self._collect_price_data, args=(), name="price_collector")
threads = [price_collector_thread]
for symbol in self.symbols.keys():
thread1 = Thread(target=self._get_orderbook_data, args=(symbol,), name=str(symbol) + "order_collector")
thread2 = Thread(target=self._get_trade_data, args=(symbol,), name=str(symbol) + "trade_collector")
threads.append(thread1)
threads.append(thread2)
for thread in threads:
thread.start()
Thread(target=self._thread_checker, args=(threads,)).start()
| amirhosseinttt/nobitexAutoTrader | dataCollector/Controller.py | Controller.py | py | 11,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "os.path.path.di... |
28091587486 | from flask import Flask, render_template, redirect
from flask_restful import abort, Api
import news_resources
import projects_resources
import library_recources
import reports_resources
from __all_forms import LoginForm, RegistrationForm, CreateNewsForm, CreateProjectForm, ChangePasswordForm, \
FindErrorForm
from data import db_session
from flask_login import LoginManager, login_user, logout_user, login_required, current_user
from data.users import User
from data.added_games import AddedGames
from requests import get, delete, post
import datetime
import random
app = Flask(__name__)
api = Api(app)
login_manager = LoginManager()
login_manager.init_app(app)
app.config['JSON_AS_ASCII'] = False # чтобы в API русские символы нормально отображались
app.config['SECRET_KEY'] = '13fth14hg83g93hg13hg1b9h8b13v4n2i'
app.config['PERMANENT_SESSION_LIFETIME'] = datetime.timedelta(hours=12) # автовыход из акканта через 12 часов
@login_manager.user_loader
def load_user(user_id):
session = db_session.create_session()
return session.query(User).get(user_id)
@app.route('/') # главаня страница, просто информация о сайте
def main_page():
return render_template('main.html')
@app.route("/news/<int:id>") # отображение определенной новости
@login_required # ее можно посмотреть. только если пользватель вошел в аккаунт
def full_news(id):
news = get('http://127.0.0.1:8080/api/v2/news/' + str(id)).json()['news'] # получение json одной новости
if news: # если нет такой новости, то...
news['created_date'] = str(news['created_date'])[0:10] # дата округляется с точностью до дня
return render_template("full_news.html", news=news)
abort(404) # ...возвращаем ошибку 404
@app.route('/support',
methods=["GET", "POST"]) # страница для обращения пользователя администраторам, если найдена ошибка
@login_required # можно написать обращение, только если вошел в аккаунт
def support_page():
form = FindErrorForm() # форма создания репорта
if form.validate_on_submit(): # если нажали на кнопку, то заносим в БД новый репорт
post("http://127.0.0.1:8080/api/v2/reports", json={
"title": form.title.data,
"content": form.content.data,
"author": current_user.username}
)
return redirect("/") # переадресация на главную станицу
return render_template('support.html', form=form)
@app.route("/reports") # отображение всех репортов
@login_required
def all_reports():
if current_user.is_developer: # страница доступна только для администраторов
reports = get('http://127.0.0.1:8080/api/v2/reports').json()['reports']
for item in reports:
item['id'] = str(item['id']) # тоже преобразование с string для создания ссылки на html форме
reports.reverse() # реверс для отображения сначала новых репортов
return render_template('reports.html', reports=reports)
abort(403) # ошибка: недостаточно прав
@app.route("/full_report/<int:id>") # отображение полностью одного репорта
@login_required
def full_report(id):
if current_user.is_developer: # доступно только администраторам
report = get('http://127.0.0.1:8080/api/v2/reports/' + str(id)).json()['report'] # получения json репорта
if report:
report["id"] = str(report["id"]) # опять преобразование в string для использование в html
return render_template("full_report.html", report=report)
abort(404)
@app.route("/full_report/delete_report/<int:id>",
methods=["GET", "DELETE"]) # на этот адрес будут переадресация при нажатии на ссылку с надписью "удалить"
@login_required
def delete_report(id):
delete('http://127.0.0.1:8080/api/v2/reports/' + str(id)) # удаление репорта
return redirect("/reports")
@app.route('/projects') # страница со всеми проектами (играми), которые сделала компания
def projects_page():
projects = get('http://127.0.0.1:8080/api/v2/projects').json()['projects'] # получение json всех проектов
added_projects = list()
session = db_session.create_session()
session = session.query(AddedGames.project_name).filter(AddedGames.username == current_user.username).all()
# получение список множеств id проектов, которые добавлены в библиотеку у пользователя (пример: [(1,), (2,)])
for i in session:
added_projects.append(str(i[0])) # добавляем эти id без множеств (пример [(1,), (2,)] -> [1, 2])
for i in range(len(projects)):
projects[i]["id"] = str(projects[i]["id"]) # опять то же самое, что было в новостях и репортах...
return render_template('projects.html', projects=projects, added_projects=added_projects)
@app.route('/add_project_lib/<int:id>', methods=['GET', 'POST']) # добавление проекта в библиотеку пользователя
@login_required # можно перейти только если выполнен вход в профиль
def projects_add(id):
post('http://127.0.0.1:8080/api/v2/library', json={
"project_name": id, # id игры, которую добавили
"username": current_user.username # логин пользователя, который добавил игру
})
return redirect("/projects") # возврат на страницу со всеми проектами
@app.route('/delete_project_lib/<int:id>') # удаление проекта из библиотеки пользователя
@login_required
def delete_project(id):
session = db_session.create_session()
game_id = session.query(AddedGames.id).filter(AddedGames.username == current_user.username,
AddedGames.project_name == id).first()
# получение id проекта, который надо удалить
delete('http://127.0.0.1:8080/api/v2/library/' + str(game_id[0]))
return redirect('/projects')
@app.route('/profile/<string:username>') # страница профиля
@login_required
def profile_page(username):
# статистика доступна только для того, кому она принадлежит
if current_user.username == username or current_user.is_developer:
session = db_session.create_session()
session = session.query(User).filter(User.username == username).first()
games = get('http://127.0.0.1:8080/api/v2/library').json()[
'games'] # получение всех игр, добавленных у всех пользователей
user_lib = list()
for i in games:
if i["username"] == current_user.username:
user_lib.append(
i["project_name"]) # в список добавляются те игры, которые добавлены именно у этого пользователя
all_proj = get("http://127.0.0.1:8080/api/v2/projects").json()["projects"] # получение всех проектов компании
games = list()
for i in all_proj:
if i["id"] in user_lib:
# если игра находится в библиотеки у пользователя, то информация о ней добавляется в переменную
i["id"] = str(i["id"])
games.append(i)
return render_template('profile.html', user=session, games=games)
abort(403)
@app.route('/login', methods=['GET', 'POST']) # страница входа в аккаунт
def login():
form = LoginForm() # не был уверен, что по API уместно передававть пароль, поэтому сделал по старому
if form.validate_on_submit():
session = db_session.create_session()
user = session.query(User).filter(User.username == form.username.data).first()
if user and user.check_password(form.password.data): # если есть такой пользователей есть и пароль такой же,
login_user(user) # то вход в аккаунт будет произведен
return redirect("/")
return render_template('login.html', # иначе на странице появляется сообщение "неправильных логин или пароль"
message="Неправильный логин или пароль",
form=form)
return render_template('login.html', form=form)
@app.route('/registration', methods=['GET', 'POST']) # регистрация
def registration():
form = RegistrationForm()
if form.validate_on_submit():
session = db_session.create_session()
if session.query(User).filter(User.username == form.username.data).first():
# если пользователь уже есть в базе данных
return render_template('register.html', message="Такой логин существует", form=form)
code = ""
for i in range(8):
code = code + str(random.randint(0, 10)) # формирование пароля для привязки аккаунта ВК
user = User(
username=form.username.data, # в БД заносится логин,
submit_code=code) # код привязки ВК
user.set_password(form.password.data) # устанавливается пароль
session.add(user)
session.commit()
login_user(user)
return redirect('/confirm_vk/' + form.username.data) # redirect на страницу с инструкцией по привязке ВК
return render_template("register.html", form=form)
@app.route('/confirm_vk/<string:name>') # страница с инструкцией по привзяке вк
@login_required
def confirm(name):
session = db_session.create_session()
user = session.query(User.submit_code).filter(User.username == name)
# получение кода этого пользователя для привязки ВК
return render_template('confirm.html', code=user[0][0])
@app.route('/create') # страница с выбором, что добавить на сайт - проект или новость
@login_required
def create(): # в ней только HTML и CSS
if current_user.is_developer: # доступно только администраторам
return render_template("create.html")
abort(403) # ошибка: недостаточно прав
@app.route("/create_news", methods=['GET', 'POST']) # создание новости
@login_required
def create_news():
if current_user.is_developer: # только администраторы
form = CreateNewsForm()
if form.validate_on_submit():
post("http://127.0.0.1:8080/api/v2/news", json={ # занесение данных новости
'title': form.title.data,
'content': form.content.data,
'author': form.author.data})
return redirect("/create")
return render_template("create_news.html", form=form)
abort(403) # ошибка: недостаточно прав
@app.route("/create_project", methods=['GET', 'POST']) # добавление проекта
@login_required
def create_project():
if current_user.is_developer: # только администраторы
form = CreateProjectForm()
if form.validate_on_submit():
post("http://127.0.0.1:8080/api/v2/projects", json={ # добавление проекта
'title': form.title.data,
'content': form.content.data,
'download_link': form.download_link.data # ссылка на скачивание игры
})
return redirect("/create")
return render_template("create_project.html", form=form)
abort(403)
@app.route('/change_password', methods=['GET', 'POST']) # изменение пароля
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
session = db_session.create_session()
user = session.query(User).filter(current_user.id == User.id).first()
# получение информации о текущем пользователе
if not user.check_password(form.exist_password.data): # если не пройдена проверка старого пароля на подлинность
return render_template("change_password.html", form=form, message="Старый пароль указан неверно")
if form.new_password.data != form.repeat_password.data: # если новый пароль, введенный в 2ух input не свопадает
return render_template("change_password.html", form=form, message="Пароли не совпадают")
user.set_password(form.new_password.data) # если все ОК, устанавливается новый пароль
session.add(user)
session.commit()
return redirect("/profile/" + str(current_user.username)) # редирект на странцу профиля
return render_template("change_password.html", form=form)
@app.route('/logout') # просто выход из аккаунта
@login_required
def logout():
logout_user()
return redirect("/")
@app.route('/change_vk') # привязка к профилю другой ВК
@login_required
def change_vk():
session = db_session.create_session()
user = session.query(User).filter(
User.username == current_user.username).first() # информация о текущем пользователе
user.is_submit = 0 # изменение подтверждение профиля на False (профиль ВК теперь не подтвержден)
code = ""
for i in range(8):
code = code + str(random.randint(0, 9)) # генерация кода подтверждения
user.submit_code = code
session.add(user)
session.commit()
return render_template('confirm.html', code=code) # HTML файл с инструкцией по подтвержению профиля ВК
@app.errorhandler(403) # оформление ошибки 403
def page_not_found(e):
return render_template('error403.html') # так она будет выглядеть
def main():
# API новостей
api.add_resource(news_resources.NewsListResource, '/api/v2/news')
api.add_resource(news_resources.NewsResource, '/api/v2/news/<int:news_id>')
# API проектов
api.add_resource(projects_resources.ProjectsListResource, '/api/v2/projects')
api.add_resource(projects_resources.ProjectsResource, '/api/v2/projects/<int:projects_id>')
# API библиотек пользователей
api.add_resource(library_recources.AddedGamesListResource, '/api/v2/library')
api.add_resource(library_recources.AddedGamesResource, '/api/v2/library/<int:game_id>')
# API репортов
api.add_resource(reports_resources.ReportsListResource, '/api/v2/reports')
api.add_resource(reports_resources.ReportsResource, '/api/v2/reports/<int:report_id>')
db_session.global_init("db/blogs.sqlite")
app.run(port=8080, host='127.0.0.1')
if __name__ == '__main__':
main()
| InfiRRiar/YL_proj_web | run.py | run.py | py | 16,572 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask_login.LoginManager",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.timed... |
29659709532 | #Import modules
from bs4 import BeautifulSoup
from urllib.request import urlopen
import math
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
#Lottery ticket expectation value calculator
class lottery_expectation_calculator:
def __init__(self):
self.current_jackpot = None
self.rollovers = None
self.expectation_value = None
#Method which uses BeautifulSoup to scrape national-lottery webpage and find current jackpot
def get_current_jackpot(self):
url = "https://www.national-lottery.co.uk/games/lotto?icid=-:mm:-:mdg:lo:dbg:pl:co"
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
metas = soup.find_all('meta')
for meta in metas:
if 'name="lotto-next-draw-jackpot"' in str(meta):
jackpot = ((meta['content']).replace(',',''))
jackpot = int(jackpot.replace('£',''))
self.current_jackpot = jackpot
#Method which uses BeautifulSoup to scrape national-lottery webpage and find current number of rollovers
def get_num_rollovers(self):
url = "https://www.national-lottery.co.uk/games/lotto?icid=-:mm:-:mdg:lo:dbg:pl:co"
page = urlopen(url)
html = page.read().decode("utf-8")
soup = BeautifulSoup(html, "html.parser")
metas = soup.find_all('meta')
for meta in metas:
if 'name="lotto-roll-count"' in str(meta):
self.rollovers = int(meta['content'])
#Method to calculate probability of getting num_correct correct numbers in a draw of 6 numbered balls from 59
def result_probability(self, num_correct):
return ((math.comb(6,num_correct))*(math.comb(53,(6 - num_correct))))/(math.comb(59,6))
def get_expectation_value(self):
#get current jackpot and number of rollovers
self.get_current_jackpot()
self.get_num_rollovers()
#Linear Regression model to predict ticket sales given current jackpot, used if self.rollovers == 5
lottery_sales_data = pd.read_csv('Lottery_sales_2021.csv')
lottery_sales_data['log_sales'] = np.log10(lottery_sales_data['Sales'])
lottery_sales_data['log_jackpot'] = np.log10(lottery_sales_data['Jackpot'])
#Prepare training data
y = np.array(lottery_sales_data['log_sales'].copy()).reshape(-1, 1)
X = np.array(lottery_sales_data['log_jackpot'].copy()).reshape(-1, 1)
#Instantiate linear regression model
regression_model = LinearRegression()
#Train model
regression_model.fit(X, y)
#Calculate probabilities of getting n correct numbers
p_0 = self.result_probability(0)
p_1 = self.result_probability(1)
p_2 = self.result_probability(2)
p_3 = self.result_probability(3)
p_4 = self.result_probability(4)
p_5_nobb = ((math.comb(6,5))*(math.comb(52,1)))/(math.comb(59,6))
p_5_bb = ((math.comb(6,5))*(math.comb(52,0)))/(math.comb(59,6))
p_6 = self.result_probability(6)
#If not a rolldown
if self.rollovers != 5:
w_0 = 0
w_1 = 0
w_2 = 2
w_3 = 30
w_4 = 140
w_5_nobb = 1750
w_5_bb = 1000000
w_6 = self.current_jackpot
#In the event of a rolldown
else:
#Predict sales from current jackpot using the linear regression model
current_jackpot = np.log10(self.current_jackpot).reshape(-1, 1)
predicted_sales = int(10**(regression_model.predict(current_jackpot)))
current_jackpot = int(10**(current_jackpot))
#Prizes calculated as per https://www.lottery.co.uk/lotto/must-be-won-draws
w_0 = 0
w_1 = 0
w_2 = 7
money_paid_to_2 = predicted_sales * p_2 * 5
w_3 = (((current_jackpot - money_paid_to_2)*0.85)/(predicted_sales * p_3)) + 30
w_4 = (((current_jackpot - money_paid_to_2)*0.07)/(predicted_sales * p_4)) + 140
w_5_nobb = (((current_jackpot - money_paid_to_2)*0.05)/(predicted_sales * p_5_nobb)) + 1750
w_5_bb = (((current_jackpot - money_paid_to_2)*0.03)/(predicted_sales * p_5_bb)) + 1000000
w_6 = get_current_jackpot()
#Calculate expectation value
self.expectation_value = (p_0 * w_0) + (p_1 * w_1) + (p_2 * w_2) + (p_3 * w_3) + (p_4 * w_4) + (p_5_nobb * w_5_nobb) + (p_5_bb * w_5_bb) + (p_6 * w_6)
print(f'The expectation value of the UK lottery is currently: £{self.expectation_value}')
lottery_exp_calc = lottery_expectation_calculator()
lottery_exp_calc.get_expectation_value()
| StanleyKubricker/Lottery-Expectation-Value | lottery_expectation_value.py | lottery_expectation_value.py | py | 4,744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.B... |
31727663113 | """
This module should contain helper functionality that assists for Jira.
"""
import logging
from collections import namedtuple
from typing import Dict, List, Union
import jira
SSLOptions = namedtuple("SSLOptions", "check_cert truststore")
class JiraUtils:
"""
This class contains the shared functions that will enable scripts to interact with JIRA.
"""
def __init__(self, jira_url: str, pat_token: str, ssl_options: SSLOptions):
"""
Default constructor that initializes the object.
Authentication is only possible doing PAT. For more information follow up on the Atlassian Documentation:
https://confluence.atlassian.com/enterprise/using-personal-access-tokens-1026032365.html
:param jira_url: URL to access the JIRA instance.
:param pat_token: The token to access the JIRA instance. Will define if a script can access the required
resources.
:param ssl_options: The NamedTuple that contains the options to configure the SSL setup.
"""
self.logger = logging.getLogger()
self.jira_url = jira_url
options = self.__prepare_ssl_options(ssl_options)
self.jira_obj = jira.JIRA(
self.jira_url,
options=options,
token_auth=pat_token,
)
@staticmethod
def __prepare_ssl_options(ssl_options: SSLOptions) -> dict:
"""
Prepares the SSL options dict for the JIRA Client.
:param ssl_options: The NamedTuple that contains the options to configure the SSL setup.
:return: The dictionary that will be passed to the JIRA library and in the end to requests.
"""
result: Dict[str, Union[str, bool]] = {}
if ssl_options.check_cert:
result["verify"] = ssl_options.truststore
else:
result["verify"] = False
return result
def jira_get_field_values(self, field_id: str, issue: str) -> Dict[str, str]:
"""
Retrieves a list of all available field values in a select or multi-select.
:param field_id: The ID of the field that the values should be retrieved for.
:param issue: The issue that decides the field values that are available to search for.
:return: The dict of possible field values or an empty dict. Keys represent the names and values are the IDs.
"""
result = {}
issue_obj = self.jira_obj.issue(issue)
meta = self.jira_obj.editmeta(issue_obj.key)
for option in meta["fields"][field_id]["allowedValues"]:
result[option.get("value")] = option.get("id")
return result
def jira_get_field_name(self, name: str) -> str:
"""
Retrieve the field ID by the name of the field that an end user sees.
:param name: The name of the field.
:return: The field ID or an emtpy string.
"""
result = ""
jira_fields = self.jira_obj.fields()
for field in jira_fields:
if field.get("name") == name:
field_id = field.get("id")
if isinstance(field_id, str):
result = field_id
break
# Should never happen since the ID is always
# a str but mypy requires this logic.
continue
return result
def jira_get_version_obj(self, issue: str, name: str):
"""
Get the version object that represents a version in JIRA:
:param issue: The issue that decides the versions that are available to search for.
:param name: The name of the version that should be retrieved
:return: The full version object as returned by the JIRA library.
"""
issue_obj = self.jira_obj.issue(issue)
project = issue_obj.get_field("project")
for version in self.jira_obj.project_versions(project):
if version.name == name:
return version
return None
def jira_get_transition_id(self, jsc: str, transition_name: str) -> str:
"""
Retrieve the transition ID of a ticket by the transition name.
:param jsc: The Jira ticket number.
:param transition_name: Name of the transition.
:return: The target transition ID or an empty str.
"""
transitions = self.jira_obj.transitions(jsc)
target_transition_id = ""
for transition in transitions:
if transition.get("name") == transition_name:
target_transition_id = transition.get("id")
return target_transition_id
def jira_transition_tickets(self, jsc: str) -> None:
"""
Transition an issue in the workflow if it is in the correct state. If not log a message.
:param jsc: The Jira ticket number.
"""
target_transition_id = self.jira_get_transition_id(jsc, "Integrated")
if target_transition_id == "":
self.logger.error(
'Issue "%s" could not be transitioned to the state "QE Open" because the transition could not be'
" identified!",
jsc,
)
return
self.jira_obj.transition_issue(jsc, target_transition_id)
def jira_do_search(self, jql: str, max_results: int = 50) -> List[str]:
"""
Perform a JIRA search.
JQL documentation: https://confluence.atlassian.com/jiracoreserver073/advanced-searching-861257209.html
:param jql: The JQL that should be used for searching.
:param max_results: The number of results that should be
:return: The list of issue keys that match the filter. The number of results is limited by ``max_results``.
"""
result: List[str] = []
for issue in self.jira_obj.search_issues(jql, maxResults=max_results):
if isinstance(jira.Issue, str):
result.append(issue.key)
return result
| openSUSE/sle-prjmgr-tools | sle_prjmgr_tools/utils/jira.py | jira.py | py | 5,957 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "jira.JIRA",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
... |
6394563903 | import jax
import jax.numpy as jnp
import numpy as np
import pyscf
import chex
from jaxtyping import Float, Array, Int
from jsonargparse import CLI, Namespace
from functools import partial
from collections import namedtuple
from icecream import ic
from pyscf_ipu.nanoDFT import utils
from pyscf_ipu.exchange_correlation.b3lyp import b3lyp
from pyscf_ipu.electron_repulsion.direct import (prepare_electron_repulsion_integrals, electron_repulsion_integrals, ipu_einsum)
HARTREE_TO_EV = 27.2114079527
EPSILON_B3LYP = 1e-20
HYB_B3LYP = 0.2
def energy(density_matrix, H_core, diff_JK, E_xc, E_nuc, _np=jax.numpy):
"""Density Functional Theory (DFT) solves the optimisation problem:
min_{density_matrix} energy(density_matrix, ...)
We like to think of `energy(...)` as a loss function. `density_matrix`
represents the density of electrons as:
rho(r) = sum_{ij}^N density_matrix_{ij} X_i(r) X_j(r) where X_i(r)~exp(-|r|^2).
Here N is the number of atomic orbitals (AO) **and** molecular orbitals (N=66 for C6H6).
All input matrices (density_matrix, H_core, diff_JK) are (N, N). The X_i(r) are called
Gaussian Type Orbitals (GTO). The inputs (diff_JK, E_xc) depend on density_matrix.
"""
E_core = _np.sum(density_matrix * H_core) # float = -712.04[Ha] for C6H6.
E_J_K = _np.sum(density_matrix * diff_JK) # NOTE: diff_JK is already diff_JK = J - (K / 2 * HYB_B3LYP)
E = E_core + E_J_K/2 + E_xc + E_nuc # float = -232.04[Ha] for C6H6.
return _np.array([E, E_core, E_J_K/2, E_xc, E_nuc]) # Energy (and its terms).
def nanoDFT_iteration(i, vals, opts, mol):
"""Each call updates density_matrix attempting to minimize energy(density_matrix, ... ). """
density_matrix, V_xc, diff_JK, O, H_core, L_inv = vals[:6] # All (N, N) matrices
E_nuc, occupancy, ERI, grid_weights, grid_AO, diis_history, log = vals[6:] # Varying types/shapes.
if opts.v:
print("---------- MEMORY CONSUMPTION ----------")
MB = 0
for t in vals:
try:
if type(t) != type(()) and len(np.shape(t)) > 0:
print(t.nbytes/10**6, t.shape, t.dtype)
MB += t.nbytes/10**6
except:
print(type(t))
print("ERI")
for a in ERI: # prints weird in dense_ERI case
print( a.nbytes/10**6, a.shape)
MB += a.nbytes / 10**6
print("__________")
print("Total: ", MB)
print("----------------------------------------")
print("")
# Step 1: Update Hamiltonian (optionally use DIIS to improve DFT convergence).
H = H_core + diff_JK + V_xc # (N, N)
if opts.diis: H, diis_history = DIIS(i, H, density_matrix, O, diis_history, opts) # H_{i+1}=c_1H_i+...+c9H_{i-9}.
# Step 2: Solve eigh (L_inv turns generalized eigh into eigh).
eigvects = L_inv.T @ linalg_eigh(L_inv @ H @ L_inv.T, opts)[1] # (N, N)
# Step 3: Use result from eigenproblem to update density_matrix.
density_matrix = (eigvects*occupancy*2) @ eigvects.T # (N, N)
E_xc, V_xc = exchange_correlation(density_matrix, grid_AO, grid_weights) # float (N, N)
diff_JK = get_JK(density_matrix, ERI, opts.dense_ERI, opts.backend) # (N, N) (N, N)
# Log SCF matrices and energies (not used by DFT algorithm).
#log["matrices"] = log["matrices"].at[i].set(jnp.stack((density_matrix, J, K, H))) # (iterations, 4, N, N)
N = density_matrix.shape[0]
log["matrices"] = jax.lax.dynamic_update_slice(log["matrices"], density_matrix.reshape(1, 1, N, N), (i, 0, 0, 0))
log["matrices"] = jax.lax.dynamic_update_slice(log["matrices"], diff_JK. reshape(1, 1, N, N), (i, 1, 0, 0))
log["matrices"] = jax.lax.dynamic_update_slice(log["matrices"], H. reshape(1, 1, N, N), (i, 2, 0, 0))
log["energy"] = log["energy"].at[i].set(energy(density_matrix, H_core, diff_JK, E_xc, E_nuc)) # (iterations, 6)
if opts.vis_num_error is True:
import os
dir_label = opts.molecule_name
num_error_dir = f'num_error/{dir_label}/'
os.makedirs(num_error_dir , exist_ok=True)
def host_callback(data, i):
# labels are adjusted to the `data` that will be passed to the callback - keep that in mind when passing different list of tensors
labels = ["density_matrix", "V_xc", "diff_JK", "O", "H_core", "L_inv", "E_nuc", "occupancy", "ERI", "grid_weights", "grid_AO", "diis_history", "E_xc", "eigvects", "H"]
for l, d in zip(labels, data):
if l == "diis_history" or l == "ERI":
for idx, arr in enumerate(d):
np.savez(f'{num_error_dir}{i}_{l}{idx}.npz', v = np.array(arr))
else:
np.savez(f'{num_error_dir}{i}_{l}.npz', v = d)
jax.debug.callback(host_callback, vals[:-1] + [E_xc, eigvects, H], i)
return [density_matrix, V_xc, diff_JK, O, H_core, L_inv, E_nuc, occupancy, ERI, grid_weights, grid_AO, diis_history, log]
def exchange_correlation(density_matrix, grid_AO, grid_weights):
"""Compute exchange correlation integral using atomic orbitals (AO) evalauted on a grid. """
# Perfectly SIMD parallelizable over grid_size axis.
# Only need one reduce_sum in the end.
grid_AO_dm = grid_AO[0] @ density_matrix # (gsize, N) @ (N, N) -> (gsize, N)
grid_AO_dm = jnp.expand_dims(grid_AO_dm, axis=0) # (1, gsize, N)
mult = grid_AO_dm * grid_AO
rho = jnp.sum(mult, axis=2) # (4, grid_size)=(4, 45624) for C6H6.
E_xc, vrho, vgamma = b3lyp(rho, EPSILON_B3LYP) # (gridsize,) (gridsize,) (gridsize,)
E_xc = jax.lax.psum(jnp.sum(rho[0] * grid_weights * E_xc), axis_name="p") # float=-27.968[Ha] for C6H6 at convergence.
rho = jnp.concatenate([vrho.reshape(1, -1)/2, 4*vgamma*rho[1:4]], axis=0) * grid_weights # (4, grid_size)=(4, 45624)
grid_AO_T = grid_AO[0].T # (N, gsize)
rho = jnp.expand_dims(rho, axis=2) # (4, gsize, 1)
grid_AO_rho = grid_AO * rho # (4, gsize, N)
sum_grid_AO_rho = jnp.sum(grid_AO_rho, axis=0) # (gsize, N)
V_xc = grid_AO_T @ sum_grid_AO_rho # (N, N)
V_xc = jax.lax.psum(V_xc, axis_name="p") # (N, N)
V_xc = V_xc + V_xc.T # (N, N)
return E_xc, V_xc # (float) (N, N)
def get_JK(density_matrix, ERI, dense_ERI, backend):
"""Computes the (N, N) matrices J and K. Density matrix is (N, N) and ERI is (N, N, N, N). """
N = density_matrix.shape[0]
if dense_ERI:
J = jnp.einsum('ijkl,ji->kl', ERI, density_matrix) # (N, N)
K = jnp.einsum('ijkl,jk->il', ERI, density_matrix) # (N, N)
diff_JK = J - (K / 2 * HYB_B3LYP)
else:
from pyscf_ipu.nanoDFT.sparse_symmetric_ERI import sparse_symmetric_einsum
diff_JK = sparse_symmetric_einsum(ERI[0], ERI[1], density_matrix, backend)
diff_JK = diff_JK.reshape(N, N)
return diff_JK
def _nanoDFT(state, ERI, grid_AO, grid_weights, opts, mol):
# Utilize the IPUs MIMD parallism to compute the electron repulsion integrals (ERIs) in parallel.
#if opts.backend == "ipu": state.ERI = electron_repulsion_integrals(state.input_floats, state.input_ints, mol, opts.threads_int, opts.intv)
#else: pass # Compute on CPU.
grid_AO = jnp.transpose(grid_AO, (1,0,2)) # (padded_gsize/16, 4, N) -> (4, pgsize, N)
# Precompute the remaining tensors.
E_xc, V_xc = exchange_correlation(state.density_matrix, grid_AO, grid_weights) # float (N, N)
diff_JK = get_JK(state.density_matrix, ERI, opts.dense_ERI, opts.backend) # (N, N) (N, N)
H_core = state.kinetic + state.nuclear # (N, N)
# Log matrices from all DFT iterations (not used by DFT algorithm).
N = H_core.shape[0]
log = {"matrices": np.zeros((opts.its, 4, N, N)), "E_xc": np.zeros((opts.its)), "energy": np.zeros((opts.its, 5))}
# Perform DFT iterations.
log = jax.lax.fori_loop(0, opts.its, partial(nanoDFT_iteration, opts=opts, mol=mol), [state.density_matrix, V_xc, diff_JK, state.O, H_core, state.L_inv, # all (N, N) matrices
state.E_nuc, state.mask, ERI, grid_weights, grid_AO, state.diis_history, log])[-1]
return log["matrices"], H_core, log["energy"]
FloatN = Float[Array, "N"]
FloatNxN = Float[Array, "N N"]
Grid = Float[Array, "4 grid_size N"]
FloatArray = Float[Array, "..."]
IntArray = Int[Array, "..."]
@chex.dataclass
class IterationState:
"""State tensors used during self-consistent field (SCF) iterations
FloatN: Vector used to store the electron occupation mask.
FloatNxN: Square matrix used for storing the one-electron integrals and density matrix.
FloatNxNxNxN: 4-d matrix representing the two-electron repulsion integrals.
Grid [4, grid_size, N] (float): Numerical grid used to evaluate the
exchange-correlation energy integral.
Attributes:
E_nuc (float): Energy of the nuclear-nuclear electrostatic interactions.
density_matrix (FloatNxN): Electron density in the LCAO basis set.
kinetic (FloatNxN): Kinetic energy integrals in the LCAO basis set.
nuclear (FloatNxN): nuclear attraction integrals in the LCAO basis set.
O (FloatNxN): Overlap integrals in the LCAO basis set.
mask (FloatN): Orbital occupation mask.
input_floats (FloatArray): Supplementary vector of floats for ERI evaluation with libcint
input_ints (IntArray): Supplementary vector of ints for ERI evaluation with libcint
L_inv (FloatNxN): Defined as the inverse of the Cholesky decomposition of the overlap matrix.
Used to change generalised eig problem into an eigh one.
diis_history (FloatArray): Direct Inversion of Iterative Subspace (DIIS) is an optional method that
can accelerate convergence of the SCF iterations. Maintains a history of how the Hamiltonian
is evolving across the SCF iterations.
"""
E_nuc: float
density_matrix: FloatNxN
kinetic: FloatNxN
nuclear: FloatNxN
O: FloatNxN
mask: FloatN
input_floats: FloatArray
input_ints: IntArray
L_inv: FloatNxN
diis_history: FloatArray
def init_dft_tensors_cpu(mol, opts, DIIS_iters=9):
N = mol.nao_nr() # N=66 for C6H6 (number of atomic **and** molecular orbitals)
print("-----> [ %i ] <-----"%N)
n_electrons_half = mol.nelectron//2 # 21 for C6H6
E_nuc = mol.energy_nuc() # float = 202.4065 [Hartree] for C6H6. TODO(): Port to jax.
# TODO(): port grid/eval_gto to Jax.
grids = pyscf.dft.gen_grid.Grids(mol)
grids.level = opts.level
grids.build()
grid_weights = grids.weights # (grid_size,) = (45624,) for C6H6
coord_str = 'GTOval_cart_deriv1' if mol.cart else 'GTOval_sph_deriv1'
grid_AO = mol.eval_gto(coord_str, grids.coords, 4) # (4, grid_size, N) = (4, 45624, 9) for C6H6.
if opts.ao_threshold > 0.0:
grid_AO[np.abs(grid_AO)<opts.ao_threshold] = 0
sparsity_mask = np.where(np.all(grid_AO == 0, axis=0), 0, 1)
sparse_rows = np.where(np.all(sparsity_mask == 0, axis=1), 0, 1).reshape(-1, 1)
print(f"axis=( , ) sparsity in grid_AO: {np.sum(grid_AO==0) / grid_AO.size:.4f}")
print(f"axis=(0, ) sparsity in grid_AO: {np.sum(sparsity_mask==0) / sparsity_mask.size:.4f}")
print(f"axis=(0, 2) sparsity in grid_AO: {np.sum(sparse_rows==0) / sparse_rows.size:.4f}")
grid_AO = jnp.delete(grid_AO, jnp.where(sparse_rows == 0)[0], axis=1)
grid_weights = jnp.delete(grid_weights, jnp.where(sparse_rows == 0)[0], axis=0)
grid_coords = jnp.delete(grids.coords, jnp.where(sparse_rows == 0)[0], axis=0)
else:
grid_coords = grids.coords
density_matrix = pyscf.scf.hf.init_guess_by_minao(mol) # (N,N)=(66,66) for C6H6.
# TODO(): Add integral math formulas for kinetic/nuclear/O/ERI.
kinetic = mol.intor_symmetric('int1e_kin') # (N,N)
nuclear = mol.intor_symmetric('int1e_nuc') # (N,N)
O = mol.intor_symmetric('int1e_ovlp') # (N,N)
L_inv = np.linalg.inv(np.linalg.cholesky(O)) # (N,N)
input_floats, input_ints = 0,0#prepare_electron_repulsion_integrals(mol)[:2]
mask = np.concatenate([np.ones(n_electrons_half), np.zeros(N-n_electrons_half)])
diis_history = None
if opts.diis:
# DIIS is an optional technique to improve DFT convergence.
DIIS_H = np.zeros((DIIS_iters+1, DIIS_iters+1))
DIIS_H[0,1:] = DIIS_H[1:,0] = 1
diis_history = (np.zeros((DIIS_iters, N**2)), np.zeros((DIIS_iters, N**2)), DIIS_H)
state = IterationState(E_nuc=E_nuc, density_matrix=density_matrix, kinetic=kinetic,
nuclear=nuclear, O=O, mask=mask,
input_floats=input_floats, input_ints=input_ints,
L_inv=L_inv, diis_history=diis_history)
return state, n_electrons_half, E_nuc, N, L_inv, grid_weights, grid_coords, grid_AO
def nanoDFT(mol, opts):
# Init DFT tensors on CPU using PySCF.
state, n_electrons_half, E_nuc, N, L_inv, _grid_weights, grid_coords, grid_AO = init_dft_tensors_cpu(mol, opts)
grid_AO = jnp.transpose(grid_AO, (1, 0, 2)) # (4,gsize,N) -> (gsize,4,N)
grid_weights = _grid_weights
gsize = grid_AO.shape[0]
remainder = gsize % opts.ndevices
if remainder != 0:
grid_AO = jnp.pad(grid_AO, ((0,remainder), (0,0), (0,0)) )
grid_weights = jnp.pad(grid_weights, ((0,remainder)) )
grid_AO = grid_AO.reshape(opts.ndevices, -1, 4, N)
grid_weights = grid_weights.reshape(opts.ndevices, -1)
# Run DFT algorithm (can be hardware accelerated).
if opts.dense_ERI:
assert opts.ndevices == 1, "Only support '--dense_ERI True' for `--ndevices 1`. "
eri_in_axes = 0
ERI = mol.intor("int2e_sph")
ERI = np.expand_dims(ERI, 0)
below_thr = np.abs(ERI) <= opts.eri_threshold
ERI[below_thr] = 0.0
ic(ERI.size, np.sum(below_thr), np.sum(below_thr)/ERI.size)
else:
from pyscf_ipu.nanoDFT.sparse_symmetric_ERI import get_i_j, num_repetitions_fast
distinct_ERI = mol.intor("int2e_sph", aosym="s8")
print(distinct_ERI.size)
below_thr = np.abs(distinct_ERI) <= opts.eri_threshold
distinct_ERI[below_thr] = 0.0
ic(distinct_ERI.size, np.sum(below_thr), np.sum(below_thr)/distinct_ERI.size)
nonzero_indices = np.nonzero(distinct_ERI)[0].astype(np.uint64)
nonzero_distinct_ERI = distinct_ERI[nonzero_indices].astype(np.float32)
ij, kl = get_i_j(nonzero_indices)
rep = num_repetitions_fast(ij, kl)
nonzero_distinct_ERI = nonzero_distinct_ERI / rep
batches = int(opts.batches) # perhaps make 10 batches?
nipu = opts.ndevices
remainder = nonzero_indices.shape[0] % (nipu*batches)
if remainder != 0:
print(nipu*batches-remainder, ij.shape)
ij = np.pad(ij, ((0,nipu*batches-remainder)))
kl = np.pad(kl, ((0,nipu*batches-remainder)))
nonzero_distinct_ERI = np.pad(nonzero_distinct_ERI, (0,nipu*batches-remainder))
ij = ij.reshape(nipu, batches, -1)
kl = kl.reshape(nipu, batches, -1)
nonzero_distinct_ERI = nonzero_distinct_ERI.reshape(nipu, batches, -1)
i, j = get_i_j(ij.reshape(-1))
k, l = get_i_j(kl.reshape(-1))
nonzero_indices = np.vstack([i,j,k,l]).T.reshape(nipu, batches, -1, 4).astype(np.int16)
nonzero_indices = jax.lax.bitcast_convert_type(nonzero_indices, np.float16)
ERI = [nonzero_distinct_ERI, nonzero_indices]
eri_in_axes = [0,0]
#jitted_nanoDFT = jax.jit(partial(_nanoDFT, opts=opts, mol=mol), backend=opts.backend)
jitted_nanoDFT = jax.pmap(partial(_nanoDFT, opts=opts, mol=mol), backend=opts.backend,
in_axes=(None, eri_in_axes, 0, 0),
axis_name="p")
print(grid_AO.shape, grid_weights.shape)
vals = jitted_nanoDFT(state, ERI, grid_AO, grid_weights)
logged_matrices, H_core, logged_energies = [np.asarray(a[0]).astype(np.float64) for a in vals] # Ensure CPU
# It's cheap to compute energy/hlgap on CPU in float64 from the logged values/matrices.
logged_E_xc = logged_energies[:, 3].copy()
print(logged_energies[:, 0] * HARTREE_TO_EV)
density_matrices, diff_JKs, H = [logged_matrices[:, i] for i in range(3)]
energies, hlgaps = np.zeros((opts.its, 5)), np.zeros(opts.its)
for i in range(opts.its):
energies[i] = energy(density_matrices[i], H_core, diff_JKs[i], logged_E_xc[i], E_nuc, np)
hlgaps[i] = hlgap(L_inv, H[i], n_electrons_half, np)
energies, logged_energies, hlgaps = [a * HARTREE_TO_EV for a in [energies, logged_energies, hlgaps]]
mo_energy, mo_coeff = np.linalg.eigh(L_inv @ H[-1] @ L_inv.T)
mo_coeff = L_inv.T @ mo_coeff
return energies, (logged_energies, hlgaps, mo_energy, mo_coeff, grid_coords, _grid_weights)
def DIIS(i, H, density_matrix, O, diis_history, opts):
# DIIS is an optional technique which improves DFT convergence by computing:
# H_{i+1} = c_1 H_i + ... + c_8 H_{i-8} where c=pinv(some_matrix)[0,:]
# We thus like to think of DIIS as "fancy momentum".
_V, _H, DIIS_H = diis_history # (diis_iters, N**2), (diis_iters, N**2), (diis_iters+1, diis_iters+1)
diis_iters, d = _V.shape
DIIS_head = i % _V.shape[0] # int in {0, ..., diis_iters-1}
sdf = O @ density_matrix @ H # (N, N)=(66,66) for C6H6.
errvec = sdf - sdf.T # (N, N)
_V = jax.lax.dynamic_update_slice(_V, errvec.reshape(1, d), (DIIS_head, 0)) # (diis_iters, N**2)=(9, 4356) for C6H6.
_H = jax.lax.dynamic_update_slice(_H, H.reshape(1, d), (DIIS_head, 0)) # (diis_iters, N**2)
mask = jnp.where(np.arange(_V.shape[0]) < jnp.minimum(i+1, _V.shape[0]), jnp.ones(_V.shape[0], dtype=_V.dtype), jnp.zeros(_V.shape[0], dtype=_V.dtype))
tmps = (_V.reshape(diis_iters, 1, d) @ errvec.reshape(1, d, 1)).reshape(-1) * mask # (diis_iters, )
DIIS_H = jax.lax.dynamic_update_slice( DIIS_H, tmps.reshape(1, -1), (DIIS_head+1, 1) ) # (diis_iters+1, diis_iters+1)
DIIS_H = jax.lax.dynamic_update_slice( DIIS_H, tmps.reshape(-1, 1), (1, DIIS_head+1) ) # (diis_iters+1, diis_iters+1)
mask_ = jnp.concatenate([jnp.ones(1, dtype=mask.dtype), mask]) # (diis_iters+1,)
masked_DIIS_H = DIIS_H * mask_.reshape(-1, 1) * mask_.reshape(1, -1)
if opts.backend == "ipu": c = pinv0(masked_DIIS_H, opts) # (diis_iters+1,)=10 for C6H6.
else: c = jnp.linalg.pinv(masked_DIIS_H)[0, :] # (diis_iters+1,)=10 for C6H6.
H = (c[1:] @ _H).reshape(H.shape) # (N, N)
return H, (_V, _H, DIIS_H) # (N, N)
def hlgap(L_inv, H, n_electrons_half, _np):
mo_energy = _np.linalg.eigh(L_inv @ H @ L_inv.T)[0]
return _np.abs(mo_energy[n_electrons_half] - mo_energy[n_electrons_half-1])
def linalg_eigh(x, opts):
if opts.backend == "ipu" and x.shape[0] >= 6:
from tessellate_ipu.linalg import ipu_eigh
n = x.shape[0]
pad = n % 2
if pad:
x = jnp.pad(x, [(0, 1), (0, 1)], mode='constant')
#assert False
eigvects, eigvals = ipu_eigh(x, sort_eigenvalues=True, num_iters=12)
if pad:
e1 = eigvects[-1:]
col = jnp.argmax(e1)
eigvects = jnp.roll(eigvects, -col-1)
eigvects = eigvects[:, :-1]
eigvects = jnp.roll(eigvects, -(-col))
eigvects = eigvects[:-1]
#assert False
else:
eigvals, eigvects = jnp.linalg.eigh(x)
return eigvals, eigvects
def pinv0(a, opts): # take out first row
# TODO: add a comment explaining the role of this constant
cond = 9*1.1920929e-07
vals, vect = linalg_eigh(a, opts)
c = vect @ ( jnp.where( jnp.abs(vals) > cond, 1/vals, 0) * vect[0, :])
return c
def grad_elec(weight, grid_AO, eri, s1, h1aos, natm, aoslices, mask, mo_energy, mo_coeff, mol):
# Electronic part of RHF/RKS gradients
dm0 = 2 * (mo_coeff*mask) @ mo_coeff.T # (N, N) = (66, 66) for C6H6.
dme0 = 2 * (mo_coeff * mask*mo_energy) @ mo_coeff.T # (N, N) = (66, 66) for C6H6.
# Code identical to exchange correlation.
rho = jnp.sum( grid_AO[:1] @ dm0 * grid_AO, axis=2) # (10, grid_size) = (10, 45624) for C6H6.
_, vrho, vgamma = b3lyp(rho, EPSILON_B3LYP) # (grid_size,) (grid_size,)
V_xc = jnp.concatenate([vrho.reshape(1, -1)/2, 4*vgamma.reshape(1, -1)*rho[1:4]], axis=0) # (4, grid_size)
vmat = grid_AO[1:4].transpose(0, 2, 1) @ jnp.sum(grid_AO[:4] * jnp.expand_dims(weight * V_xc, axis=2), axis=0) # (3, N, N)
aos = jnp.concatenate([jnp.expand_dims(grid_AO[np.array([1,4,5,6])], 0), jnp.expand_dims(grid_AO[np.array([2,5,7,8])], 0), jnp.expand_dims(grid_AO[np.array([3,6,8,9])], 0)], axis=0) # (3, N, N)
V_xc = - vmat - jnp.transpose(jnp.einsum("snpi,np->spi", aos, weight*V_xc), axes=(0,2,1)) @ grid_AO[0] # (3, 4, grid_size, N)
vj = - jnp.einsum('sijkl,lk->sij', eri, dm0) # (3, N, N)
vk = - jnp.einsum('sijkl,jk->sil', eri, dm0) # (3, N, N)
vhf = V_xc + vj - vk * .5 * HYB_B3LYP # (3, N, N)
de = jnp.einsum('lxij,ij->lx', h1aos, dm0) # (natm, 3)
for k, ia in enumerate(range(natm)):
p0, p1 = aoslices[ia][2], aoslices[ia][3]
de = de.at[k].add(jnp.einsum('xij,ij->x', vhf[:, p0:p1], dm0[p0:p1]) * 2)
de = de.at[k].add(-jnp.einsum('xij,ij->x', s1[:, p0:p1], dme0[p0:p1]) * 2)
return de
def grad_nuc(charges, coords):
# Derivatives of nuclear repulsion energy wrt nuclear coordinates
natm = charges.shape[0]
pairwise_charges = charges.reshape(natm, 1) * charges.reshape(1, natm) # (natm, natm)
pairwise_difference = coords.reshape(1, natm, 3) - coords.reshape(natm, 1, 3) # (natm, natm, 3)
pairwise_distances = jnp.linalg.norm(pairwise_difference, axis=2) ** 3 # (natm, natm)
pairwise_distances = jnp.where(pairwise_distances == 0, jnp.inf, pairwise_distances) # (natm, natm)
all = - pairwise_charges.reshape(natm, natm, 1) * pairwise_difference # (natm, natm, 3)
all = all / pairwise_distances.reshape(natm, natm, 1) # (natm, natm, 3)
all = all.at[jnp.diag_indices(natm)].set(0) # (natm, natm, 3)
return jnp.sum(all, axis=0) # (natm, natm)
def grad(mol, coords, weight, mo_coeff, mo_energy):
print(coords.shape, weight.shape)
# Initialize DFT tensors on CPU using PySCF.
ao = pyscf.dft.numint.NumInt().eval_ao(mol, coords, deriv=2)
eri = mol.intor("int2e_ip1")
s1 = - mol.intor('int1e_ipovlp', comp=3)
kin = - mol.intor('int1e_ipkin', comp=3)
nuc = - mol.intor('int1e_ipnuc', comp=3)
mask = np.ones(mol.nao_nr())
mask[mol.nelectron//2:] = 0
aoslices = mol.aoslice_by_atom()
h1 = kin + nuc
def hcore_deriv(atm_id, aoslices, h1): # <\nabla|1/r|>
_, _, p0, p1 = aoslices[atm_id]
with mol.with_rinv_at_nucleus(atm_id):
vrinv = mol.intor('int1e_iprinv', comp=3) #
vrinv *= -mol.atom_charge(atm_id)
vrinv[:,p0:p1] += h1[:,p0:p1]
return vrinv + vrinv.transpose(0,2,1)
N = h1.shape[1] # (3, N , N)
h1aos = np.zeros((mol.natm, 3, N, N))
for k, ia in enumerate(range(mol.natm)):
p0, p1 = aoslices[ia,2:]
h1aos[k] = hcore_deriv(ia, aoslices, h1)
charges = np.zeros((mol.natm))
coords = np.zeros((mol.natm,3))
for j in range(mol.natm):
charges[j] = mol.atom_charge(j)
coords[j]= mol.atom_coord(j)
#_grad_elec = jax.jit(grad_elec, static_argnames=["aoslices", "natm"], backend="cpu")
_grad_elec = grad_elec
_grad_nuc = jax.jit(grad_nuc, backend="cpu")
return _grad_elec(weight, ao, eri, s1, h1aos, mol.natm, tuple([tuple(a) for a in aoslices.tolist()]), mask, mo_energy, mo_coeff, mol) + _grad_nuc(charges, coords)
def pyscf_reference(mol_str, opts):
from pyscf import __config__
__config__.dft_rks_RKS_grids_level = opts.level
mol = build_mol(mol_str, opts.basis)
mol.max_cycle = opts.its
mf = pyscf.scf.RKS(mol)
mf.max_cycle = opts.its
mf.xc = opts.xc
mf.diis_space = 9
if not opts.diis: #
mf.diis_space = 0
mf.diis = False
pyscf_energies = []
pyscf_hlgaps = []
lumo = mol.nelectron//2
homo = lumo - 1
def callback(envs):
pyscf_energies.append(envs["e_tot"]*HARTREE_TO_EV)
hl_gap_hartree = np.abs(envs["mo_energy"][homo] - envs["mo_energy"][lumo]) * HARTREE_TO_EV
pyscf_hlgaps.append(hl_gap_hartree)
print("\rPYSCF: ", pyscf_energies[-1] , end="")
mf.callback = callback
mf.kernel()
print("")
forces = mf.nuc_grad_method().kernel()
return np.array(pyscf_energies), np.array(pyscf_hlgaps), np.array(forces)
def print_difference(nanoDFT_E, nanoDFT_forces, nanoDFT_logged_E, nanoDFT_hlgap, pyscf_E, pyscf_forces, pyscf_hlgap):
#TODO(HH): rename to match caller variable names
print("pyscf_hlgap\t%15f"%( pyscf_hlgap[-1]))
print("us_hlgap\t%15f"%( nanoDFT_hlgap[-1]))
print("err_hlgap\t%15f"%np.abs(pyscf_hlgap[-1] - nanoDFT_hlgap[-1]))
print("pyscf:\t\t%15f"%pyscf_E[-1])
print("us:\t\t%15f"%nanoDFT_E[-1, 0])
print("mus:\t\t%15f"%np.mean(nanoDFT_E[-10:, 0]))
print("diff:\t\t%15f"%np.abs(pyscf_E[-1]-nanoDFT_E[-1, 0]))
print("mdiff:\t\t%15f"%np.abs(pyscf_E[-1]-np.mean(nanoDFT_E[-10:, 0])), np.std(nanoDFT_E[-10:, 0]))
print("chemAcc: \t%15f"%0.043)
print("chemAcc/diff: \t%15f"%(0.043/np.abs(pyscf_E[-1]-nanoDFT_E[-1, 0])))
print("chemAcc/mdiff: \t%15f"%(0.043/np.abs(pyscf_E[-1]-np.mean(nanoDFT_E[-10:, 0]))))
print("")
pyscf_E = np.concatenate([pyscf_E, np.ones(nanoDFT_E.shape[0]-pyscf_E.shape[0])*pyscf_E[-1]])
pyscf_hlgap = np.concatenate([pyscf_hlgap, np.ones(nanoDFT_hlgap.shape[0]-pyscf_hlgap.shape[0])*pyscf_hlgap[-1]])
print("%18s"%"", "\t".join(["%10s"%str("iter %i "%i) for i in np.arange(1, nanoDFT_E.shape[0]+1)[1::3]]))
print("%18s"%"Error Energy [eV]", "\t".join(["%10s"%str("%.2e"%f) for f in (pyscf_E[1::3] - nanoDFT_E[1::3, 0]).reshape(-1)]))
print("%18s"%"Error HLGAP [eV]", "\t".join(["%10s"%str("%.2e"%f) for f in (pyscf_hlgap[1::3] - nanoDFT_hlgap[1::3]).reshape(-1)]))
print()
print("%18s"%"E_core [eV]", "\t".join(["%10s"%str("%.5f"%f) for f in (nanoDFT_E[1::3, 1]).reshape(-1)]))
print("%18s"%"E_J_K [eV]", "\t".join(["%10s"%str("%.5f"%f) for f in (nanoDFT_E[1::3, 2]).reshape(-1)]))
print("%18s"%"E_xc [eV]", "\t".join(["%10s"%str("%.5f"%f) for f in (nanoDFT_E[1::3, 3]).reshape(-1)]))
print("%18s"%"E_nuc [eV]", "\t".join(["%10s"%str("%.5f"%f) for f in (nanoDFT_E[1::3, 4]).reshape(-1)]))
# Forces
print()
print("np.max(|nanoDFT_F-PySCF_F|):", np.max(np.abs(nanoDFT_forces-pyscf_forces)))
norm_X = np.linalg.norm(nanoDFT_forces, axis=1)
norm_Y = np.linalg.norm(pyscf_forces, axis=1)
dot_products = np.sum(nanoDFT_forces * pyscf_forces, axis=1)
cosine_similarity = dot_products / (norm_X * norm_Y)
print("Force cosine similarity:",cosine_similarity)
def build_mol(mol_str, basis_name):
mol = pyscf.gto.mole.Mole()
mol.build(atom=mol_str, unit="Angstrom", basis=basis_name, spin=0, verbose=0)
return mol
def nanoDFT_options(
its: int = 20,
mol_str: str = "benzene",
float32: bool = False,
basis: str = "sto-3g",
xc: str = "b3lyp",
backend: str = "cpu",
level: int = 0,
multv: int = 2,
intv: int = 1,
threads: int = 1,
threads_int: int = 1,
diis: bool = True,
structure_optimization: bool = False, # AKA gradient descent on energy wrt nuclei
eri_threshold : float = 0.0,
ao_threshold: float = 0.0,
batches: int = 32,
ndevices: int = 1,
dense_ERI: bool = False,
v: bool = False, # verbose
profile: bool = False, # if we only want profile exit after IPU finishes.
vis_num_error: bool = False,
molecule_name: str = None
):
"""
nanoDFT
Args:
its (int): Number of Kohn-Sham iterations.
mol_str (str): Molecule string, e.g., "H 0 0 0; H 0 0 1; O 1 0 0;" or one of:
'benzene', 'methane', 'TRP', 'LYN', 'TYR', 'PHE', 'LEU', 'ILE', 'HIE', 'MET', 'GLN', 'HID', 'GLH', 'VAL', 'GLU', 'THR', 'PRO', 'ASN', 'ASH', 'ASP', 'SER', 'CYS',
'CYX', 'ALA', 'GLY'
float32 (bool) : Whether to use float32 (default is float64).
basis (str): Which Gaussian basis set to use.
xc (str): Exchange-correlation functional. Only support B3LYP
backend (str): Accelerator backend to use: "--backend cpu" or "--backend ipu".
level (int): Level of grids for XC numerical integration.
gdb (int): Which version of GDP to load {10, 11, 13, 17}.
multv (int): Which version of our einsum algorithm to use;comptues ERI@flat(v). Different versions trades-off for memory vs sequentiality
intv (int): Which version to use of our integral algorithm.
threads (int): For -backend ipu. Number of threads for einsum(ERI, dm) with custom C++ (trades-off speed vs memory).
threads_int (int): For -backend ipu. Number of threads for computing ERI with custom C++ (trades off speed vs memory).
eri_threshold (float): Zero out ERIs that are below the threshold in absolute value. Not supported for '--backend ipu'.
ao_threshold (float): Zero out grid_AO that are below the threshold in absolute value.
dense_ERI (bool): Whether to use dense ERI (s1) or sparse symmtric ERI.
"""
if molecule_name is None:
# use mol_str as a molecule name (in case it has not been provided)
# before mol_str CLI arg is preprocessed and overwritten
molecule_name = mol_str
# From a compound name or CID, get a list of its atoms and their coordinates
mol_str = utils.process_mol_str(mol_str)
if mol_str is None:
exit(1)
print(f"Minimum interatomic distance: {utils.min_interatomic_distance(mol_str)}") # TODO: dies for --mol_str methane
args = locals()
mol_str = args["mol_str"]
del args["mol_str"]
args = Namespace(**args)
from pyscf_ipu.experimental.device import has_ipu
import os
if has_ipu() and "JAX_IPU_USE_MODEL" in os.environ:
args.dense_ERI = True
args = namedtuple('DFTOptionsImmutable',vars(args).keys())(**vars(args)) # make immutable
if not args.float32:
jax.config.update('jax_enable_x64', not float32)
return args, mol_str
def main():
# Limit PySCF threads to mitigate problem with NUMA nodes.
jax.config.FLAGS.jax_platform_name = 'cpu'
import os
opts, mol_str = CLI(nanoDFT_options)
assert opts.xc == "b3lyp"
print("Precision: float32") if opts.float32 else print("Precision: float64")
if not opts.structure_optimization:
# Test Case: Compare nanoDFT against PySCF.
mol = build_mol(mol_str, opts.basis)
print(f"Number of Atomic Orbitals\t{mol.nao_nr():15d}")
print(f"Number of electrons\t{mol.nelectron:15d}")
nanoDFT_E, (nanoDFT_logged_E, nanoDFT_hlgap, mo_energy, mo_coeff, grid_coords, grid_weights) = nanoDFT(mol, opts)
nanoDFT_forces = grad(mol, grid_coords, grid_weights, mo_coeff, mo_energy)
pyscf_E, pyscf_hlgap, pyscf_forces = pyscf_reference(mol_str, opts)
print_difference(nanoDFT_E, nanoDFT_forces, nanoDFT_logged_E, nanoDFT_hlgap, pyscf_E, pyscf_forces, pyscf_hlgap)
if opts.vis_num_error is True:
from utils import save_plot
import sys
_plot_title = f"Created with: python {' '.join(sys.argv)}"
save_plot("num_error/", opts.molecule_name, opts.its, _plot_title)
else:
# pip install mogli imageio[ffmpeg] matplotlib
import mogli
import imageio
import matplotlib.pyplot as plt
opts = opts._replace(basis="6-31G")
p = np.array([[0,1,1], [0,2,2], [0,3,3],
[0,4,4], [0,5,5], [0,6,6]])
np.random.seed(42)
p = p + np.random.normal(0, 0.3, p.shape) # slightly break symmetry
A = ["H", "O", "H", "H", "O", "H"]
natm = p.shape[0]
os.makedirs("_tmp/", exist_ok=True)
E = []
ims = []
for i in range(20):
mol_str = "".join([f"{A[i]} {p[i]};".replace("[", "]").replace("]", "") for i in range(natm)])
mol = build_mol(mol_str, opts.basis)
nanoDFT_E, (nanoDFT_logged_E, nanoDFT_hlgap, mo_energy, mo_coeff, grid_coords, grid_weights) = nanoDFT(mol, opts)
f = open(f"_tmp/{i}.xyz", "w")
f.write(f"""{natm}\n{mol_str} {nanoDFT_E[-1, 0]}\n"""+"".join([f"{A[i]} {p[i]}\n".replace("[", "").replace("]", "") for i in range(natm)]))
f.close()
molecules = mogli.read(f'_tmp/{i}.xyz')
mogli.export(molecules[0], f'_tmp/{i}.png', width=400, height=400,
bonds_param=1.15, camera=((9, 0, 0),
(0, 0, 0),
(0, 9, 0)))
ims.append(imageio.v2.imread(f"_tmp/{i}.png/"))
E.append(nanoDFT_E[-1, 0])
nanoDFT_forces = grad(mol, grid_coords, grid_weights, mo_coeff, mo_energy)
p = p - nanoDFT_forces
print(nanoDFT_E[-1, 0], i)
writer = imageio.get_writer('_tmp/test.gif', loop=0, duration=3)
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
for c, i in enumerate(ims):
for a in ax: a.cla()
ax[0].axis("off")
ax[1].set_ylabel("Energy [eV]")
ax[1].set_xlabel("Step Number in Structure Optimization")
ax[0].imshow(i)
ax[1].plot(E, label="energy [eV]")
ax[1].legend()
ax[1].plot([c, c], [np.min(E), np.max(E)], '-k')
plt.tight_layout()
plt.savefig("_tmp/tmp.jpg")
writer.append_data(imageio.v2.imread("_tmp/tmp.jpg"))
writer.close()
if __name__ == "__main__":
main()
| graphcore-research/pyscf-ipu | pyscf_ipu/nanoDFT/nanoDFT.py | nanoDFT.py | py | 36,015 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "jax.numpy",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.shape",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "jax.lax.dynamic_update_slice",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "jax.lax",
... |
10731339024 | from db import model
from api import api
from datetime import datetime
import json
import webbrowser
from vkinder.settings import Settings
class APP:
def __init__(self, token, _id):
self.vk = api.VK(token)
self.user_id = str(self.vk.resolve_screen_name(_id))
self.db = model.DB(self.user_id)
self.settings = Settings(self.user_id)
@staticmethod
def user_input():
url = 'https://oauth.vk.com/authorize?client_id=7331062&display=page&redirect_uri=https://oauth.vk.com/blank.html&scope=friends,photos,groups&response_type=token&v=5.103'
webbrowser.open_new_tab(url)
token = input('Введите токен: ')
_id = input('Введите ваш id: ')
return APP(token, _id)
@staticmethod
def progress_bar(data, func, name, one=False, bar_len=50):
start = datetime.now()
data_len = len(data)
result = []
for i, value in enumerate(data):
p_bar = round(i / data_len * bar_len)
bar = f'[{"*"*p_bar + " "*(bar_len - p_bar)}]'
print(
f'{name:^15} {bar} {i} of {data_len} {datetime.now() - start}\r',
end=''
)
if one:
func(value)
else:
result.extend(func(**value))
print(f'{name} [{"*"*bar_len}] {data_len} of {data_len} {datetime.now() -start} done')
return result
def get_top_three_profile_photos(self, _id):
photos = self.vk.get_top_three_profile_photos(_id)
if not photos:
return 'Профиль закрыт или без фото'
def parse(photo):
return {
'w': 6,
'z': 5,
'y': 4,
'x': 3,
'm': 2,
's': 1,
}.get(photo['type'], 0)
return [max(photo['sizes'], key=parse)['url'] for photo in photos]
def out(self, users):
file_name = f'out_{self.user_id}.json'
with open(file_name, mode='w', encoding='utf-8') as file:
json.dump(
[
{f'https://vk.com/{user["domain"]}':
self.get_top_three_profile_photos(user['id'])}
for user in users
],
file, ensure_ascii=False, indent=4)
print(f'результат записан в {file_name}')
def check_settings(self):
obligatory_search_params = [
'age_from', 'age_to', 'sex', 'country', 'city'
]
for param in obligatory_search_params:
if param not in self.settings.search:
self.settings.search[param] =\
input(f'Нехватает {param}. Введите значение: ')
if 'group_id' not in self.settings.search:
load = input('Введите id групп через запятую или "load" для загрузки.')
if load == 'load':
self.settings.search['group_id'] =\
self.vk.get_groups(self.user_id)
else:
self.settings.search['group_id'] = load.split(', ')
| rychanya/vkinder | src/vkinder/app.py | app.py | py | 3,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "api.api.VK",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "api.api",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "db.model.DB",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db.model",
"line_number": 14,
... |
69886416746 | import requests
import os
CHAT_ID = os.getenv('CHAT_ID')
TOKEN = os.getenv('TG_TOKEN')
print(f"TOKEN is {TOKEN}, CHAT_ID is {CHAT_ID}")
if not CHAT_ID or not TOKEN:
raise ValueError("Char ID or TOKEN was not specified")
TOKEN = TOKEN.replace("\n", "")
async def send_message(data: str):
"""
Send data to the telegrame bot
:param data: Data to send
:return: True if sent successfully
"""
send_text = 'https://api.telegram.org/bot' + TOKEN + '/sendMessage?chat_id=' \
+ CHAT_ID + '&parse_mode=Markdown&text=' + data
print(f"Send text was {send_text}")
response = requests.get(send_text)
return response.ok
| dievskiy/devops-kubernetes-course | part-5/502/broadcaster/send.py | send.py | py | 666 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.getenv",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
}
] |
5495864560 | import pickle
from flask import Flask, request, app, jsonify,url_for, render_template
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
app=Flask(__name__)
## load the model
regmodel=pickle.load(open('regmodel.pkl','rb'))
scalar=pickle.load(open('scaling.pkl','rb'))
@app.route('/')
def home():
return render_template('home.html')
@app.route('/predict_api',methods=['POST'])
def predict_api():
data=request.json['data']
print(data)
## the data values receieved must be transformed into a list which is then changed into np array
## the numpy array is then reshaped to a single row, multiple columns(1,13)
print(np.array(list(data.values())).reshape(1,-1))
new_data=scalar.transform(np.array(list(data.values())).reshape(1,-1))
output=regmodel.predict(new_data)
# since this is a two dimensional array, we need output[0]
print(output[0])
return jsonify(output[0])
if __name__=="__main__":
app.run(debug=True)
| MithunMiranda/bostonhousepricing | app.py | app.py | py | 997 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.app",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 11,
... |
74552235623 | import os
import json
import sqlite3
import datetime, time
import itertools
from common import util
import queue
import threading
from threading import Thread
import logging
import sqlite3
import datetime, time
import itertools
from common import util
class IDEBenchDriver:
def init(self, options, schema, driver_arg):
self.time_of_latest_request = 0
self.isRunning = False
self.requests = queue.LifoQueue()
self.config = json.load(open(os.path.join(os.path.dirname(__file__),'..','sqlite.config.json')))
def create_connection(self):
sqlite_file = self.config['dbFilename']
conn = sqlite3.connect(sqlite_file)
return conn
def sqlitefix(self, sql_statement):
if not "FLOOR" in sql_statement:
return sql_statement
else:
sql_statement=sql_statement.replace("FLOOR", "ROUND")
x=sql_statement.find("ROUND")
y=sql_statement.find(")",x)
output=sql_statement[:y]+" -0.5 "+sql_statement[y:]
#print(output,flush=True)
return output
def execute_vizrequest(self, viz_request, options, schema, result_queue):
viz = viz_request.viz
sql_statement = viz.get_computed_filter_as_sql(schema)
#calculate connection time
connection = self.conn
cursor = connection.cursor()
viz_request.start_time = util.get_current_ms_time()
#print(sql_statement,flush=True,end = '')
cursor.execute(self.sqlitefix(sql_statement))
data = cursor.fetchall()
viz_request.end_time = util.get_current_ms_time()
cursor.close()
results = {}
for row in data:
keys = []
for i, bin_desc in enumerate(viz.binning):
if "width" in bin_desc:
bin_width = bin_desc["width"]
keys.append(str(int(row[i])))
else:
keys.append(str(row[i]))
key = ",".join(keys)
results[key] = row[len(viz.binning):]
viz_request.result = results
result_queue.put(viz_request)
def process_request(self, viz_request, options, schema, result_queue):
self.requests.put((viz_request, options, schema, result_queue))
def process(self):
self.conn = self.create_connection()
while self.isRunning:
try:
request = self.requests.get(timeout=1)
viz_request = request[0]
options = request[1]
schema = request[2]
result_queue = request[3]
# only execute requests that are newer than the last one we processed (drops old/no longer needed queries)
if viz_request.expected_start_time < self.time_of_latest_request:
viz_request.dropped = True
result_queue.put(viz_request)
continue
self.time_of_latest_request = viz_request.expected_start_time
self.execute_vizrequest(viz_request, options, schema, result_queue)
except Exception as e:
# ignore queue-empty exceptions
pass
# close connection when done
self.conn.close()
def workflow_start(self):
self.isRunning = True
thread = Thread(target = self.process)
thread.start()
def workflow_end(self):
self.isRunning = False
| leibatt/crossfilter-benchmark-public | drivers/sqlite.py | sqlite.py | py | 3,498 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "queue.LifoQueue",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 2... |
31604178727 | import json
ores = [
'iron',
'coal',
'gold',
'copper',
'silver',
'redstone',
'diamond',
'lapis',
'emerald',
'quartz',
'tin',
'lead',
'nickel',
'zinc',
'aluminum',
'cobalt',
'osmium',
'iridium',
'uranium',
'ruby',
'sapphire',
'sulfur',
'cinnabar',
'potassium_nitrate',
'apatite',
'fluorite'
]
stones = [
'andesite',
'basalt',
'blackstone',
'calcite',
'deepslate',
'diorite',
'end_stone',
'granite',
'netherrack',
'stone',
'tuff',
'create_limestone',
'create_scoria',
'create_scorchia',
'quark_jasper',
'quark_limestone',
'quark_shale'
]
forge_ores = {
"replace": False,
"values" : []
}
forge_ores_specific = {
"raplace": False,
"values": []
}
for ore in ores :
for stone in stones :
forge_ores["values"].append('omniores:' + ore + '_ore_' + stone)
forge_ores_specific["values"].append('omniores:' + ore + '_ore_' + stone)
block_forge_ores_specific = open('F:/Creative/diversityMods/omniores/src/main/resources/data/forge/tags/blocks/ores/'+ore+'.json', 'w')
block_forge_ores_specific.write(json.dumps(forge_ores_specific, indent=2))
block_forge_ores_specific.close()
item_forge_ores_specific = open('F:/Creative/diversityMods/omniores/src/main/resources/data/forge/tags/items/ores/'+ore+'.json', 'w')
item_forge_ores_specific.write(json.dumps(forge_ores_specific, indent=2))
item_forge_ores_specific.close()
forge_ores_specific['values'] = []
block_forge_ores_file = open('F:/Creative/diversityMods/omniores/src/main/resources/data/forge/tags/blocks/ores.json', 'w')
block_forge_ores_file.write(json.dumps(forge_ores, indent=2))
block_forge_ores_file.close()
item_forge_ores_file = open('F:/Creative/diversityMods/omniores/src/main/resources/data/forge/tags/items/ores.json', 'w')
item_forge_ores_file.write(json.dumps(forge_ores, indent=2))
item_forge_ores_file.close() | N-Wither/omniores | src/main/python_data_generate/tag/ores.py | ores.py | py | 2,012 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 79,
... |
18873086609 | from django.shortcuts import redirect, render
from . import models
def index(req):
return render(req, 'home.html')
def listKategori(req):
if req.method == 'POST':
kategori_id = req.POST.get('kategori_id')
data = models.KategoriModel.objects.get(id=kategori_id)
data.delete()
return redirect('/kategori/')
return render(req, 'kategori/list-kategori.html', context={
'kategories': models.KategoriModel.objects.all()
})
def addKategori(req):
if req.method == 'POST':
nama = req.POST.get('nama')
data = models.KategoriModel()
data.nama = nama
data.save()
return redirect('/kategori/')
return render(req, 'kategori/add-form.html')
def editKategori(req, id):
if req.method == 'POST':
nama = req.POST.get('nama')
data = models.KategoriModel.objects.get(id=id)
data.nama = nama
data.save()
return redirect('/kategori/')
return render(req, 'kategori/edit-form.html', context={
'kategori': models.KategoriModel.objects.get(id=id)
})
def listToko(req):
if req.method == 'POST':
toko_id = req.POST.get('toko_id')
data = models.TokoModel.objects.get(id=toko_id)
data.delete()
return redirect('/toko/')
return render(req, 'toko/list-toko.html', context={
'tokos': models.TokoModel.objects.all()
})
def addToko(req):
if req.method == 'POST':
nama = req.POST.get('nama')
pemilik = req.POST.get('pemilik')
email = req.POST.get('email')
no_telp = req.POST.get('no_telp')
logo = req.FILES.get('logo')
alamat = req.POST.get('alamat')
data = models.TokoModel()
data.nama = nama
data.pemilik = pemilik
data.email = email
data.no_telp = no_telp
data.logo = logo
data.alamat = alamat
data.save()
return redirect('/toko/')
return render(req, 'toko/add-form.html')
def editToko(req, id):
if req.method == 'POST':
data = models.TokoModel.objects.get(id=id)
if req.FILES:
logo = req.FILES.get('logo')
data.logo = logo
nama = req.POST.get('nama')
pemilik = req.POST.get('pemilik')
email = req.POST.get('email')
no_telp = req.POST.get('no_telp')
alamat = req.POST.get('alamat')
data.nama = nama
data.pemilik = pemilik
data.email = email
data.no_telp = no_telp
data.alamat = alamat
data.save()
return redirect('/toko/')
return render(req, 'toko/edit-form.html', context={
'toko': models.TokoModel.objects.get(id=id)
})
def listProduct(req):
if req.method == 'POST':
product_id = req.POST.get('product_id')
data = models.ProdukModel.objects.get(id=product_id)
data.delete()
return redirect('/product/')
return render(req, 'product/list-product.html', context={
'products': models.ProdukModel.objects.all()
})
def addProduct(req):
if req.method == 'POST':
nama = req.POST.get('nama')
harga = req.POST.get('harga')
berat = req.POST.get('berat')
stok = req.POST.get('stok')
gambar = req.FILES.get('gambar')
kategori = req.POST.getlist('kategori')
deskripsi = req.POST.get('deskripsi')
data = models.ProdukModel()
data.nama = nama
data.harga = harga
data.berat = berat
data.stok = stok
data.gambar = gambar
data.deskripsi= deskripsi
data.save()
for i in kategori:
data.kategori.add(i)
return redirect('/products/')
return render(req, 'product/add-form.html', context={
'kategories': models.KategoriModel.objects.all()
})
def editProduct(req, id):
if req.method == 'POST':
data = models.ProdukModel.objects.get(id=id)
if req.FILES:
gambar = req.FILES.get('gambar')
data.gambar = gambar
nama = req.POST.get('nama')
harga = req.POST.get('harga')
berat = req.POST.get('berat')
stok = req.POST.get('stok')
kategori = req.POST.getlist('kategori')
deskripsi = req.POST.get('deskripsi')
data.nama = nama
data.harga = harga
data.berat = berat
data.stok = stok
data.deskripsi= deskripsi
data.save()
for i in kategori:
data.kategori.add(i)
return redirect('/product/')
productKategories = models.ProdukModel.objects.filter(id=id).values('kategori')
productKategoriID = []
for i in productKategories:
productKategoriID.append(i.get('kategori'))
return render(req, 'product/edit-form.html',context={
'product': models.ProdukModel.objects.get(id=id),
'kategories': models.KategoriModel.objects.all(),
'productKategori': productKategoriID
}) | zakiyul/2022django | bookstore/views.py | views.py | py | 4,596 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name... |
7522166332 | import cv2
from mtcnn import MTCNN
capture = cv2.VideoCapture(0)
detector = MTCNN()
while True:
ret, frame = capture.read()
faces = detector.detect_faces(frame)
for single_faces in faces:
x, y, width, height = single_faces["box"]
left_eyeX, left_eyeY = single_faces["keypoints"]["left_eye"]
right_eyeX, right_eyeY = single_faces["keypoints"]["right_eye"]
noseX, noseY = single_faces["keypoints"]["nose"]
mouth_leftX, mouth_leftY = single_faces["keypoints"]["mouth_left"]
mouth_rightX, mouth_rightY = single_faces["keypoints"]["mouth_right"]
cv2.rectangle(frame, pt1=(x, y), pt2=(x + width, y + height), color=(255, 0, 0), thickness=3)
cv2.circle(frame, center=(left_eyeX, left_eyeY), color=(255, 0, 0), thickness=3, radius=2)
cv2.circle(frame, center=(right_eyeX, right_eyeY), color=(255, 0, 0), thickness=3, radius=2)
cv2.circle(frame, center=(noseX, noseY), color=(255, 0, 0), thickness=3, radius=2)
cv2.circle(frame, center=(mouth_leftX, mouth_leftY), color=(255, 0, 0), thickness=3, radius=2)
cv2.circle(frame, center=(mouth_rightX, mouth_rightY), color=(255, 0, 0), thickness=3, radius=2)
cv2.rectangle(frame, pt1=(x, y), pt2=(x + width, y + height), color=(255, 0, 0), thickness=3)
cv2.imshow("front_cam", frame)
if cv2.waitKey(1) & 0xFF == ord("b"):
break
cv2.destroyAllWindows() | iamSobhan/deep_learning | face_detection/face_video_detect.py | face_video_detect.py | py | 1,466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "mtcnn.MTCNN",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.rectangle",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.circle",
"line_numbe... |
6601688520 | from flask import render_template,request,redirect
from models import *
from forms import CardForm, StoryForm, NewsForm
from upload import save_file
from app import app
from extensions import db
import requests
import xml.etree.ElementTree as ET
@app.route('/')
def index():
story = Story.query.all()
card = Cards.query.all()
currency = ForeignCurrency.query.all()
news = News.query.all()
return render_template('index.html',story=story,cards=card,currency=currency,news=news)
@app.route('/cards/')
def card():
card = Cards.query.all()
story = Story.query.all()
return render_template('card.html',cards=card,story=story, page_name='cards')
@app.route("/loans/")
def credits():
card = Cards.query.all()
story = Story.query.all()
return render_template('kredit.html',page_name='credits',cards=card,story=story,)
@app.route('/add-card/', methods=['GET', 'POST'])
def addcard():
form = CardForm()
if request.method == 'POST':
print('post')
if form.validate:
card_title = form.title.data
card_desc = form.description.data
card_term = form.term.data
card_currency = form.currency.data
card_cashback = form.cashback.data
image_data = form.image.data
print(image_data)
card_image = save_file(image_data)
card = Cards(card_title,card_desc,card_term,card_currency,card_cashback,card_image)
card.save()
print(card)
return redirect('/cards')
return render_template('card_add.html', form=form)
@app.route('/add-story/', methods=['GET', 'POST'])
def addStory():
form = StoryForm()
if request.method == 'POST':
if form.validate_on_submit():
story_tip = form.tip.data
story_description = form.description.data
story_image = save_file(form.image.data)
story_color = form.color.data
storys = Story(story_tip,story_description,story_image,story_color)
storys.save()
return redirect('/cards')
return render_template('story_add.html', form=form)
@app.route('/valyuta/')
def loadRSS():
url = f'https://www.cbar.az/currencies/{datetime.datetime.now().strftime("%d.%m.%Y")}.xml'
response = requests.get(url, stream=True)
tree = ET.fromstring(response.content)
dom = tree.findall('ValType')
db.session.query(ForeignCurrency).delete()
db.session.commit()
for attr_type in dom:
name = attr_type.get('Type')
if name == 'Xarici valyutalar':
valute = attr_type.findall('Valute')
for code in valute:
code_name = code.get('Code')
if code_name == 'USD' or code_name == "AZN" or code_name == "EUR":
nominal = code.find('Nominal').text
name = code.find('Name').text
course = code.find('Value').text
currency = ForeignCurrency(code_name, nominal, name, course)
currency.save()
return redirect('/cards')
@app.route('/add-news/', methods=['GET', 'POST'])
def addnews():
form = NewsForm()
if request.method == 'POST':
if form.validate_on_submit():
news_title = form.title.data
news = News(news_title)
news.save()
return render_template('news_add.html', form=form) | Emrahgs/Yelo-Bank-Flask | controllers.py | controllers.py | py | 3,427 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "app.app.route",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.app",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
... |
2116048955 | from mirl.agents.td3_agent import TD3Agent
import mirl.torch_modules.utils as ptu
import torch.nn.functional as F
import copy
from mirl.utils.logger import logger
class TD3BCAgent(TD3Agent):
def __init__(
self,
env,
policy,
qf,
qf_target,
pool=None,
normalize_obs=True,
bc_alpha=2.5,
**td3_kwargs
):
super().__init__(
env,
policy,
qf,
qf_target,
**td3_kwargs
)
self.bc_alpha = bc_alpha
self.normalize_obs = normalize_obs
if normalize_obs:
assert pool is not None
assert pool.compute_mean_std
self.obs_mean, self.obs_std = pool.get_mean_std()['observations']
self.obs_mean = ptu.from_numpy(self.obs_mean)
self.obs_std = ptu.from_numpy(self.obs_std)
def process_obs(self, obs):
if self.normalize_obs:
obs = obs = (obs-self.obs_mean)/(self.obs_std+1e-6)
return obs
def step_explore(self, o, **kwargs):
raise NotImplementedError
def step_exploit(self, o, **kwargs):
with self.policy.deterministic_(True):
o = ptu.from_numpy(o)
o = self.process_obs(o)
a,_ = self.policy.action(o, **kwargs)
a = ptu.get_numpy(a)
return a, {}
def compute_policy_loss(
self, obs, new_action,
origin_action, v_pi_kwargs={}
):
q_new_action, _ = self.qf.value(
obs,
new_action,
return_ensemble=False,
**v_pi_kwargs
)
q_pi_mean = q_new_action.mean()
policy_loss = -q_pi_mean
policy_info = self._log_policy_info(
new_action, policy_loss, q_pi_mean)
### for td3bc ###
bc_lambda = self.bc_alpha / q_pi_mean.detach().abs().mean()
bc_loss = F.mse_loss(new_action, origin_action)
policy_loss = policy_loss*bc_lambda + bc_loss
if self._log_tb_or_not():
logger.tb_add_scalar("policy/bc_loss", bc_loss, self.num_train_steps)
###### end ######
return policy_loss, policy_info
def train_from_torch_batch(self, batch):
rewards = batch['rewards']
terminals = batch['terminals']
obs = batch['observations']
actions = batch['actions']
next_obs = batch['next_observations']
obs = self.process_obs(obs)
next_obs = self.process_obs(next_obs)
self.log_batch(rewards, terminals)
################
# update critic #
################
q_target = self.compute_q_target(next_obs, rewards, terminals, self.next_v_pi_kwargs)
qf_loss, train_qf_info = self.compute_qf_loss(obs, actions, q_target)
self.qf_optimizer.zero_grad()
qf_loss.backward()
self.log_critic_grad_norm()
self.qf_optimizer.step()
if self.num_train_steps % self.target_update_freq == 0:
self._update_target(self.soft_target_tau)
self.train_info.update(train_qf_info)
################
# update actor #
################
if self.num_train_steps % self.policy_update_freq == 0:
new_action, action_info = self.policy.action(obs, **self.current_sample_kwargs)
policy_loss, train_policy_info = self.compute_policy_loss(
obs,
new_action,
actions, #NOTE: origin action for bc
self.current_v_pi_kwargs
)
self.policy_optimizer.zero_grad()
policy_loss.backward()
self.policy_optimizer.step()
self.train_info.update(train_policy_info)
#####################
# update statistics #
#####################
if self._need_to_update_eval_statistics:
self._need_to_update_eval_statistics = False
self.eval_statistics.update(self.train_info)
self.log_train_info()
self.num_train_steps += 1
return copy.deepcopy(self.train_info) | QiZhou1997/MIRL | mirl/agents/td3bc_agent.py | td3bc_agent.py | py | 4,104 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mirl.agents.td3_agent.TD3Agent",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "mirl.torch_modules.utils.from_numpy",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "mirl.torch_modules.utils",
"line_number": 33,
"usage_type": "name"
},... |
12267770021 | # coding=utf8
from youtubesearchpython import Search
import pandas as pd
import numpy as np
search_query_test = 'Кончится Лето Kino'
def find_first_vid(search_query):
allSearch = Search(search_query, limit = 1)
result_dict = allSearch.result()
result_dict2 = list(result_dict.values())[-1]
result_dict3 = result_dict2[-1]
result_dict4 = list(result_dict3.values())[-2]
return result_dict4
df_songs = pd.read_csv('song_links_results.csv', encoding='utf8')
df_list_songs = df_songs.values.tolist()
song_list = []
for i in df_list_songs:
song_list.append(i[1])
from pytube import YouTube
import os
from pathlib import Path
def youtube2mp3 (url,outdir):
# url input from user
yt = YouTube(url)
##@ Extract audio with 160kbps quality from video
video = yt.streams.filter(abr='160kbps').last()
##@ Downloadthe file
out_file = video.download(output_path=outdir)
base, ext = os.path.splitext(out_file)
new_file = Path(f'{base}.mp3')
os.rename(out_file, new_file)
##@ Check success of download
if new_file.exists():
print(f'{yt.title} has been successfully downloaded.')
else:
print(f'ERROR: {yt.title}could not be downloaded!')
'''
for song in song_list[1619:]:
print(song_list.index(song))
try:
youtube2mp3(song,'dirpath')
except FileExistsError:
continue
except AttributeError:
continue
except KeyError:
continue
df_convert = pd.read_csv('converted_spotify_music_list.csv', encoding='utf8')
#df.apply(lambda x: pd.lib.infer_dtype(x.values))
df_titles_artists = df_convert[['Track Name','Artist Name(s)']]
df_titles_artists.astype(str)
df_titles_artists["separator"] = '-'
df_titles_artists["combined_col"] = df_titles_artists["Track Name"] + df_titles_artists["separator"] + df_titles_artists["Artist Name(s)"]
df_combined = df_titles_artists["combined_col"]
print(df_combined)
df_combined = df_combined.dropna(axis=0)
titles_artists_list = df_combined.values.tolist()
#print(titles_artists_list)
song_link_list = []
#titles_artists_list_subset = titles_artists_list[0:50]
titles_artists_list_subset = titles_artists_list
for song in titles_artists_list_subset:
print('trying:',song)
result_link = find_first_vid(song)
song_link_list.append(result_link)
df_song_links = pd.DataFrame(song_link_list)
df_song_links.to_csv('song_links_results.csv', encoding='utf-8')
''' | WildLegolas/spotify_csv_to_mp3 | youtube_vid_finder.py | youtube_vid_finder.py | py | 2,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "youtubesearchpython.Search",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pytube.YouTube",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.spl... |
39156552883 | import re
from math import log
from time import sleep
import subprocess
from io import StringIO
from pathlib import Path
from operator import itemgetter
from Bio import SearchIO, SeqIO
from Bio.Blast import NCBIXML, NCBIWWW
from RecBlast import print, merge_ranges
from RecBlast.WarningsExceptions import *
class Search(object):
def __init__(self, search_type):
self.search_type = search_type
def __call__(self, seq_record, species, database, database_path, local,
indent, perc_ident, verbose, database_port=None,
expect=None, megablast=True, n_threads=1, write=False, filetype=None,
**kwargs):
# query_length = len(seq_record)
if isinstance(database, Path):
return self.load(database)
elif isinstance(database, str) and database != 'stop':
return self.load(Path(database))
elif database == 'stop':
raise StopRecBlast()
elif self.search_type in ["blastn", "blastp", "blastx", "tblastx", "tblastn"]:
if verbose > 1:
print(self.search_type, 'was selected.', indent=indent)
dt = self.blast_prep(search_type=self.search_type, db_loc=database_path, database=database,
species=species, verbose=verbose, indent=indent)
return self.blast_run(seq_record=seq_record, species=species, database=dt.name, filetype=filetype,
blast_type=self.search_type, local_blast=local, expect=expect, megablast=megablast,
use_index=False, perc_ident=perc_ident, verbose=verbose, indent=indent,
n_threads=n_threads, blastdb=database_path, outtype=5, return_raw=False,
**kwargs)
elif self.search_type in ['blat', 'tblat', 'blat-transcript', 'tblat-transcript']:
if verbose > 1:
print(self.search_type, 'was selected.', indent=indent)
port = self.blat_prep(database_port=database_port, species=species, verbose=verbose, indent=indent)
return self.blat_run(seq_record=seq_record, local=local, port=port,
filetype=filetype, blat_type=self.search_type, perc_ident=perc_ident,
verbose=verbose, indent=indent, blatdb=database_path, outtype='pslx')
else:
raise SearchEngineNotImplementedError('Invalid selection for search type!')
@staticmethod
def blast_run(seq_record, species, database, blast_type, filetype="fasta",
local_blast=False, expect=0.005, megablast=True, use_index=False, perc_ident=75,
verbose=True, indent=0, n_threads=1, blastdb='/usr/db/blastdb/', outtype=5,
return_raw=False, **kwargs):
"""A wrapper function for BLAST searches.
:param seq_record: The record containing the query sequence for the search. Can be either a SeqIO.SeqRecord or
a string with the file loaction.
:param str species: The species whose sequence database will be queried.
:param Union[dict, str, Path] database: The name of the database to be used in the search.
:param str blast_type: Type of BLAST search being performed
:param str filetype: Filetype of seq_record (if seq_record is a SeqRecord object, leave as default.
[default: 'fasta']
:param bool local_blast: Should the search be conducted locally or on remote servers? (BLAT searches are always
local.) [Default: False]
:param float expect: Highest expect value of BLAST results to be returned. [Default: 0.005]
:param bool megablast: Should MegaBLAST be used for nucleotide searches? [Default: True]
:param bool use_index: Should BLAST use indexes associated with the database files? [Default: False]
:param int perc_ident: Minimum percent identity required of results to be returned [Default: 75]
:param bool verbose: Verbose output? [Default: True]
:param int indent: Indent level for pretty print. [Default: 0]
:param int n_threads: Number of threads to allocate for BLAST [Default: 1]
:param str blastdb: Path of databases for either BLAST or BLAT. [Default: '/usr/db/blastdb'
:param int outtype: Output type. (see options for BLAST and BLAT) [Default: pslx]
:param bool return_raw: Return raw output rather than processed BioBlastRecord? [Default: False]
:param kwargs: Additional keyword arguments to pass on to BLAST/BLAT.
:return: blast_record, blast_err
"""
if isinstance(seq_record, SeqIO.SeqRecord):
pass
else:
seq_record = SeqIO.read(seq_record, filetype)
args = dict()
if verbose:
print("Now starting BLAST...", indent=indent)
if local_blast:
# build up the BLAST arguments:
args.update({'-db': str(database), '-evalue': expect,
'-outfmt': str(outtype),
'-num_threads': n_threads})
if blast_type == 'blastn':
if megablast:
args['-task'] = 'megablast'
if use_index:
args['-use_index'] = use_index
args['-perc_identity'] = perc_ident
args_expanded = list()
[(args_expanded.append(j), args_expanded.append(k)) for j, k in args.items()]
if verbose:
print('Running BLAST locally...', indent=indent)
print('Options:', indent=indent)
print(args_expanded, indent=indent + 1)
if blast_type in ["blastn", "blastp", "blastx", "tblastx", "tblastn"]:
blast_cline = [blast_type] + args_expanded
try:
blast_handle = subprocess.check_output([str(i) for i in blast_cline],
input=seq_record.format('fasta'),
universal_newlines=True, cwd=blastdb)
if isinstance(blast_handle, str):
blast_result = blast_handle
blast_err = None
else:
blast_result, blast_err = blast_handle
except subprocess.CalledProcessError:
raise
else:
raise SearchError("Invalid blast choice!")
else:
args.update(dict(program=str(blast_type), database=str(database), sequence=seq_record.format('fasta'),
entrez_query='"{}"[ORGN]'.format(species), expect=expect, perc_ident=perc_ident))
if megablast & (blast_type == 'blastn'):
args['megablast'] = 'True'
if kwargs:
args.update(**kwargs)
if verbose:
print('Submitting Remote BLAST! Options passed:', indent=indent)
for k, v in args.items():
print('{0}\t=\t{1}'.format(k, v), indent=indent + 1)
try:
blast_result = NCBIWWW.qblast(**args)
blast_err = None
except Exception as err:
print(type(err), err)
raise err
if verbose:
print('Done with Blast!', indent=indent)
if return_raw:
return blast_result, blast_err
else:
if isinstance(blast_result, StringIO):
blast_record = NCBIXML.read(blast_result)
else:
try:
with StringIO(''.join(blast_result)) as fin:
blast_record = NCBIXML.read(fin)
except Exception as err:
print('Error reading Blast Results! Aborting!', indent=indent)
print('Error details:\n', err, indent=indent)
raise err
return blast_record, blast_err
@staticmethod
def blat_run(seq_record, port, local="localhost", filetype="fasta", blat_type='blat', perc_ident=None,
verbose=True, indent=0, blatdb='/usr/db/blastdb/', outtype='pslx'):
"""A wrapper function for BLAT searches.
:param seq_record: The record containing the query sequence for the search. Can be either a SeqIO.SeqRecord or
a string with the file loaction.
:param int port: Port of the gfServer to be queried
:param str local: Host address.
:param str filetype: Filetype of seq_record (if seq_record is a SeqRecord object, leave as default.
[default: 'fasta']
:param str blat_type: Type of search to conduct. Can be a BLAST type (blastn, blastp, blastx, tblastn, tblastx)
or a BLAT type (blat, tblat). [Default: 'blastn']
:param int perc_ident: Minimum percent identity required of results to be returned [Default: 75]
:param bool verbose: Verbose output? [Default: True]
:param int indent: Indent level for pretty print. [Default: 0]
:param str blatdb: Path of databases for either BLAST or BLAT. [Default: '/usr/db/blastdb'
:param str outtype: Output type. (see options for BLAST and BLAT) [Default: pslx]
:return: blat_record, blat_err
"""
if isinstance(seq_record, SeqIO.SeqRecord):
pass
elif isinstance(seq_record, str):
seq_record = SeqIO.read(seq_record, filetype)
else:
raise TypeError('seq_record was of type {}, must be either '
'a str with filepath or a SeqRecord object!'.format(type(seq_record)))
if verbose:
print("Now starting BLAT...", indent=indent)
if verbose > 1:
print('Search Type: ', blat_type, indent=indent)
args_expanded = ['gfClient', local, str(port), '/', '/dev/stdin', '/dev/stdout']
args_expanded += ['-t=dnax', '-q=prot'] if blat_type.lower() == 'tblat' else []
args_expanded += ['minIdentity={}'.format(perc_ident if perc_ident else 0),
'-out={}'.format(outtype)]
try:
if verbose > 1:
print('BLAT command:', indent=indent)
print(' '.join(args_expanded), indent=indent + 1)
blat = subprocess.Popen(args_expanded, stdout=subprocess.PIPE,
universal_newlines=True, cwd=blatdb,
stdin=subprocess.PIPE, stderr=subprocess.PIPE)
blat_raw, blat_raw_err = blat.communicate(input=seq_record.format('fasta'))
if blat_raw_err:
raise SearchError(blat_raw_err)
head = subprocess.Popen(["head", "-n", "-1"], universal_newlines=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
blat_handle = head.communicate(input=blat_raw)
if verbose > 2:
print(blat_handle[0], indent=indent)
if verbose:
print('Done!', indent=indent)
if isinstance(blat_handle, str):
blat_result = blat_handle
blat_err = None
else:
blat_result, blat_err = blat_handle
except subprocess.CalledProcessError:
raise
blat_result, blast_err = blat_result, blat_err
blat_record = None
with StringIO(blat_result) as fin:
try:
if outtype == 'pslx':
blat_record = SearchIO.read(fin, format='blat-psl', pslx=True)
elif outtype == 'psl':
blat_record = SearchIO.read(fin, format='blat-psl')
elif outtype == 'blast8':
blat_record = SearchIO.read(fin, format='blast-tab')
elif outtype == 'blast9':
blat_record = SearchIO.read(fin, format='blast-tab', comments=True)
elif outtype == 'blast':
blat_record = SearchIO.read(fin, format='blast-xml')
else:
raise SearchError('Invalid out type')
except ValueError:
if verbose:
print('No Query Results were found in handle for seq_record {}!'.format(seq_record.id))
raise NoHitsError('No Query Results were found in handle for seq_record {}!'.format(seq_record.id))
except Exception as err:
print('Error reading BLAT results! Aborting!')
print('Error details:\n')
raise err
return blat_record, blat_err
@staticmethod
def blat_prep(database_port, species, verbose, indent):
if isinstance(database_port, dict):
try:
blat_port = database_port[species]
if verbose > 1:
print('Using port {0} for gfServer of species {1}.'.format(blat_port, species), indent=indent)
except KeyError:
raise SearchError('No 2bit found for species {}!'.format(species))
elif isinstance(database_port, int):
blat_port = database_port
elif isinstance(database_port, str):
try:
blat_port = int(database_port)
except ValueError:
raise SearchError('Invalid option "{}" was passed to database_port! database_port must be '
'either a dictionary of species-port pairs or an integer!'.format(database_port))
else:
raise SearchError('Invalid option of type "{}" was passed to database_port! database_port must be '
'either a dictionary of species-port pairs or an '
'integer!'.format(str(type(database_port))))
return blat_port
@staticmethod
def blast_prep(search_type, database, species, verbose, indent, db_loc):
if database == 'auto' or database == 'auto-transcript':
if verbose > 1:
print('Blast type set to auto!', indent=indent)
try:
blast_db = get_searchdb(search_type=search_type, species=species, db_loc=db_loc,
verbose=verbose, indent=indent + 1)
except Exception:
raise SearchError('No BLAST database was found for species {}!'.format(species))
elif isinstance(database, dict):
try:
blast_db = database[species]
if verbose > 1:
print('Using {} as BLAST database!'.format(blast_db), indent=indent)
except KeyError:
raise SearchError('No BLAST database was found for species {}!'.format(species))
elif isinstance(database, str) or isinstance(database, Path):
blast_db = database
else:
raise SearchError('Invalid type given for database!')
return blast_db
@staticmethod
def load(database):
try:
if database.exists() and database.is_file():
rec = None
with database.open('r') as forward_blasthits:
if database.suffix == '.psl':
rec = SearchIO.read(forward_blasthits, 'blat-psl')
elif database.suffix == '.pslx':
rec = SearchIO.read(forward_blasthits, 'blat-psl', pslx=True)
elif database.suffix == '.xml':
rec = SearchIO.read(forward_blasthits, 'blast-xml')
else:
raise SearchError('Database file "{}" could not be loaded - '
'Must be either a PSL, PSLX, or BLAST-XML file!'.format(str(database)))
else:
raise FileNotFoundError()
except FileNotFoundError:
raise SearchError('Database file "{}" was not found!'.format(str(database)))
return rec
def id_search(id_rec, id_type='brute', verbose=2, indent=0, custom_regex=None, regex_only=False):
"""
EX:
gi =
refseq_accession = 'XP_010883249.1'
scaffold = 'scaffold_145\t[:1033526-1034566](-)\t190
id =
chr = 'chrX[:3047971-3259961](-)119'
seq_range =
assembly1 = 'KN678312.1 [:9787-29116](+) 478'
assembly2 = 'KN678312.1 [:9787-29116](+) 478'
symbol = 'TP53'
symbol = 'INS [:259-568](+) (161)'
strand = '+'
:param id_rec:
:param id_type:
:param custom_regex:
:param regex_only:
:param verbose:
:param indent:
:return:
"""
# Define the regex functions
p = dict(gi=re.compile('(\Agi[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
accession=re.compile('(\A[AXNYZ][MWRCPGTZ][| _:]+[0-9.]+|\Aref[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
scaffold=re.compile('(\Ascaffold[| _:]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
id=re.compile('(\Aid[| _:]*[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
chr=re.compile('(\Achr[| _:]*[A-Za-z0-9.]+)'
'([| \t:_])??\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly=re.compile('(\A[A-Za-z]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
assembly_broad=re.compile('(\b[ALYB]+[0-9.]+)'
'([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
symbol=re.compile('(\A\S+)([| \t:_])?\[?(:?\d+-?\d+)?\]?([| \t:_])?(.*)'),
seq_range=re.compile(':?(\d+)-(\d+)'),
strand=re.compile('(\([-+0N]\))'),
score=re.compile('\d\d*')
)
if custom_regex is not None:
p = {'custom': custom_regex}
id_type = 'custom'
# Begin search:
if verbose > 1:
print('ID Loaded, performing regex search for identifiers...', indent=indent)
print('ID type: ', id_type, indent=indent)
if id_type == 'brute':
for tmp_type in ['accession', 'gi', 'scaffold', 'id', 'chr', 'assembly', 'assembly_broad', 'symbol']:
if bool(p[tmp_type].findall(id_rec)):
if verbose > 1:
print('Brute Force was set, tested strings for all pre-registered IDs.', indent=indent)
print('ID was selected as type {0}!'.format(tmp_type), indent=indent + 1)
if regex_only:
return p[tmp_type]
else:
return id_search(id_rec=id_rec, id_type=tmp_type, verbose=verbose, indent=indent)
raise IDError('Couldn\'t identify the id type of line: {}!'.format(id_rec))
else:
try:
item_parts = p[id_type].findall(id_rec)[0]
if verbose > 1:
print('Successfully found {0}, compiling list!'.format(id_type), indent=indent)
print('Item:\t', '\t'.join(item_parts), indent=indent + 1)
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
try:
item_parts = list(item_parts)
item_parts[0] = item_parts[0] if not isinstance(item_parts[0], str) else ''.join(item_parts[0])
if item_parts[2]:
try:
sr_tuple = p['seq_range'].findall(item_parts[2])[0]
if verbose > 1:
print('Found sequence delimiters in IDs!', indent=indent)
print(sr_tuple, indent=indent + 1)
except IndexError:
raise IDError('A positive match for a sequence range was found '
'({0}), yet no hits were identified! Confirm that '
'the regex is correct and try again!'.format(item_parts[2]))
else:
sr_tuple = (0, -1)
if item_parts[4]:
try:
strand = p['strand'].findall(item_parts[4])[0]
except IndexError:
strand = '(N)'
try:
score = p['score'].findall(item_parts[4])[0]
except IndexError:
score = 0
else:
strand = '(N)'
score = '0'
if verbose > 1:
if strand != '(N)':
print('Strand info found: {0}'.format(strand), indent=indent)
if score != '0':
print('Score info found: {0}'.format(score), indent=indent)
seq_range = (int(sr_tuple[0]), int(sr_tuple[1]), strand, int(score))
return p, item_parts[0], seq_range, id_type
except IndexError:
raise IDError('Could not identify patterns in {0} with id_type={1}, '
'is the id_search sequence correct?'.format(id_rec, id_type))
def percent_identity_searchio(hit, is_protein=True):
"""Calculates percent identity based on entire hit. Adapted from UCSC BLAT FAQ and Biopython."""
size_mul = 3 if is_protein else 1
qali_size = size_mul * sum([i[-1] - i[0] for i in merge_ranges([(hsp.query_start, hsp.query_end) for hsp in hit])])
tali_size = sum([i[-1] - i[0] for i in merge_ranges([(hsp.hit_start, hsp.hit_end) for hsp in hit])])
ali_size = min(qali_size, tali_size)
if ali_size <= 0:
return 0
size_dif = qali_size - tali_size
size_dif = 0 if size_dif < 0 else size_dif
sum_match = sum([i.match_num for i in hit])
sum_rep = sum([i.match_rep_num for i in hit])
sum_mismatch = sum([i.mismatch_num for i in hit])
total = size_mul * (sum_match + sum_rep + sum_mismatch)
if total != 0:
millibad = (1000 * (sum([i.mismatch_num for i in hit]) * size_mul + sum([i.query_gap_num for i in hit]) +
round(3 * log(1 + size_dif)))) / total
else:
raise Exception('Somehow your total in the percent_identity function was 0, so you broke the script!')
perc_ident = 100 - (millibad * 0.1)
return perc_ident
def get_searchdb(search_type, species, db_loc, verbose=1, indent=0):
"""Finds and returns the appropriate search database for the given species and search type.
This function automates the process of selecting the search database needed by the selected search program,
like BLAST or BLAT, so that the user does not need to preoccupy themselves with providing said information
for a large number of species. For BLAST* that depend on protein databases (BLASTP and BLASTX), the function
searches for files matching the form 'Genus_species_protein.*' in the given directory; for BLAST* that depend
on DNA databases (BLASTN, TBLASTN, and TBLASTX), it instead looks for files 'Genus_species_genome.*'.
If '-transcript' is added to the end of any of the DNA-dependent BLAST*, then instead the function will
search for files in the style of 'Genus_species_transcript.*'. In the case of BLAT searches, the program will
similarly search for 'Genus_species*.2bit', or for 'Genus_species*transcript.2bit' if '-transcript' is added
after the search type.
In all usage cases, if the program does not find files matching the 'Genus_species' format, it will try to
find the files using a case-insensitive search using the 6-letter abbreviated form of the species name.
Usage::
>>> get_searchdb('blastp', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/Homo_Sapiens_protein.*
>>> get_searchdb('tblastn', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_genome.*
>>> get_searchdb('blastn-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.*
>>> get_searchdb('blat', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap.2bit
>>> get_searchdb('blat-transcript', 'Homo sapiens', '/path/to/search/files')
/path/to/search/files/HomSap_transcript.2bit
Arguments::
:param str search_type: The name of the search method (blast or blat, and sub-type: blastp, blastn, blat, tblat...)
:param str species: Name of species associated with the database. If there is a space, it will be replaced with an
underscore.
:param str db_loc: Path to folder containing collection of search databases.
:param int verbose: How verbose should the output be. Zero suppresses all output, 2 is max verbosity.
:param int indent: Indent level for printed output.
:return str: Path to the identified search database.
"""
if verbose:
print('Search DB set to auto, choosing search_db...', indent=indent)
species = species.replace(' ', '_')
if verbose > 1:
print('Search DB location set to: ', db_loc, indent=indent)
db_type_dict = {
'blastx': "protein",
'blastp': "protein",
'blastn': "genome",
'tblastn': "genome",
'tblastx': "genome",
'blastn-transcript': "transcript",
'tblastn-transcript': "transcript",
'tblastx-transcript': "transcript",
'blat': "blat",
'tblat': "blat",
'blat-transcript': 'blat-transcript',
'tblat-transcript': 'tblat-transcript'
}
try:
db_type = db_type_dict[search_type]
except KeyError:
print('Unable to determine search db type!', indent=indent)
raise SearchError('Improper search type given ({})!'.format(search_type))
if verbose > 1:
print('DB type: ', db_type, indent=indent)
db_path = Path(db_loc).absolute()
if not db_path.exists():
db_path = Path(db_loc)
if db_path.exists() and db_path.is_dir():
if db_type == 'blat':
glob_path = [i for i in
db_path.glob('{0}*.2bit'.format(species.replace(' ', '_')))] # Todo: generalize extension
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species.replace(' ', '_')))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species.replace(' ', '_'), db_type))]
if not glob_path:
if verbose:
print('No DB found! Trying again with abbreviated species name', indent=indent)
species_abbv = ''.join([i[0:3] for i in species.title().split('_')])
# making it insensitive to case for Glob
species_abbv_insensitive = ''.join(['[{0}{1}]'.format(c.lower(),
c.upper()) for c in species_abbv if c.isalpha()])
if verbose:
print('Abbreviated species name: ', species_abbv, indent=indent)
print('RegEx species abbreviation: ', species_abbv_insensitive, indent=indent)
if db_type == 'blat':
glob_path = [i for i in db_path.glob('{0}*.2bit'.format(species_abbv_insensitive))]
elif db_type in ['blat-transcript', 'tblat-transcript']:
glob_path = [i for i in db_path.glob('{0}*transcript.2bit'.format(species_abbv_insensitive))]
else:
glob_path = [i for i in db_path.glob('{0}_{1}*'.format(species_abbv_insensitive, db_type))]
try:
if verbose:
print(glob_path, indent=indent)
if isinstance(glob_path, list):
search_db = sorted(glob_path, reverse=True)[0]
else:
search_db = glob_path
except IndexError:
print('WARNING: COULD NOT FIND DATABASE! ABORTING!', indent=indent)
raise DatabaseNotFoundError('', 'No databases were found!')
else:
raise DatabaseNotFoundError('DB_Path {} does not exist!'.format(str(db_path)))
if verbose:
print('{0} DB chosen: {1}'.format(search_type, str(search_db)), indent=indent)
return search_db
def blat_server(twobit, order='start', host='localhost', port=20000, type='blat', log='/dev/null', species=None,
search_db_loc='/usr/db/blat', verbose=1, indent=0, try_limit=10, **kwargs):
"""Convenience function that controls a gfServer. Still in alpha.
This function serves as a python wrapper for the Bash gfServer command. The user can either provide a .2bit file,
or else can provide a species and set 'twobit="auto"' to have the function use 'get_searchdb()' to find a .2bit file
automatically. By default, the function is set to start up a new gfServer instance, but using the 'order' parameter,
the user can execute any of the standard gfServer commands such as 'stop' and 'status'.
To start a gfServer, the function first probes the selected port (default is 20000) to ensure its unused; if it is
currently in use, the program then goes port-by-port in ascending order until it finds an empty port to use for the
server. Then, it simply calls the gfServer command with all the keyword arguments required, as well as with any
extra arguments provided by the user.
Usage::
>>>blat_server(twobit='hg38.2bit', port=20000, verbose=3)
gfServer start localhost 20001 -canStop -stepSize=5 hg38.2bit
# Waits 30 seconds, then starts calling 'gfServer status localhost 20001' every 30 seconds for 5 minutes
# If at any point 'gfServer status' returns something that is not an error or "Couldn't connect...", it
# returns the port where the server was opened.
20001
>>>blat_server(twobit='auto', port=20000, species='Homo sapiens', verbose=3)
# Calls get_searchdb('blat', 'Homo sapiens', db_loc=BLATDB)
# Internally, will return a .2bit file such as 'Homo_sapiens.2bit'
20001
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server is active:
1
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server either has not been started or is not yet active:
0
>>>blat_server(twobit='hg38.2bit', port=20000, order='status', verbose=3)
# If the server returns an error
Exception(...)
:param str twobit: A path to the .2bit file to be used for the server. Can also be set to 'auto'.
:param str order: A command for gfServer. Can be one of the following: start, stop, status, files, query (requires
a nucleotide sequence in fasta format), protQuery (requires a protein sequence in fasta format), transQuery
(requires a nucleotide sequence in fasta format), pcr (requires arguments fPrimer, rPrimer, maxDistance), direct
(requires probe.fa, file(s).nib), or pcrDirect (requires fPrimer, rPrimer, file(s).nib).
:param str host: Address at which to host the server.
:param int port: Port number that will be assigned to server. If in use, will test new port number in increments of
1 until a free port is found.
:param str type: Type of server to be hosted. 'blat' will start a DNA server, 'tblat' will start a DNAX server for
protein queries.
:param str log: Path and name of log file to be written.
:param str species: Species name that get_searchdb() will use to find .2bit file when twobit='auto'.
:param str search_db_loc: Path to the folder containing .2bit file.
:param int verbose: Level of verbosity of function output. 0 suppresses all output, 3 is max verbosity.
:param int indent: Indentation level of print output.
:param int try_limit: Number of tries at 30-second intervals that function should probe the gfServer before timeout.
:param kwargs: keyword arguments to be passed on to gfServer.
:return: if order='start', returns the port of the new gfServer; if order='status', returns 0 if there was no
connection, or 1 if the server is active and responding.
"""
# Regular: gfServer start localhost portX -stepSize=5 -log=untrans.log database.2bit
# Prot>DNAX: gfServer start localhost portY -trans -mask -log=trans.log database.2bit
gfserver_suppl_args = list()
if twobit == 'auto' and order != 'stop':
if verbose:
print('2bit set to auto: searching for 2bit file for species ', species, indent=indent)
twobit = get_searchdb(search_type='blat', species=species, db_loc=search_db_loc,
verbose=verbose, indent=indent + 1)
if twobit.exists() and twobit.is_file():
twobit = twobit.name
else:
raise BLATServerError('Invalid 2bit file!')
for key, item in kwargs.items():
if key == 'order':
order = item
elif key == 'host':
host = item
elif key == 'port':
port = item
else:
gfserver_suppl_args.append('-{0}={1}'.format(key, item))
if order == 'status':
gfcheck = subprocess.Popen('gfServer status {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
out, _ = gfcheck.communicate()
if "couldn't connect to localhost" in out.lower():
return 0
elif "error" in out.lower():
raise BLATServerError(out)
else:
return 1
elif order == 'stop':
subprocess.check_call('gfServer stop {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
return
else:
print(order)
# Todo: make the portsniffer its own function and make sure it works properly.
portfinder = subprocess.check_output('/home/manny/Scripts/oneshot/checkifportisopen.sh {}'.format(str(port)),
universal_newlines=True, shell=True, executable='/bin/bash')
port = portfinder.rstrip()
gfserver_cmd = ['gfServer', str(order), str(host), str(port), '-canStop']
if type == 'blat':
gfserver_cmd.append('-stepSize=5')
elif type == 'tblat':
gfserver_cmd += ['-trans', '-mask']
if gfserver_suppl_args:
gfserver_cmd += gfserver_suppl_args
gfserver_cmd_str = ' '.join(gfserver_cmd + [twobit])
if verbose > 2:
print(gfserver_cmd_str, indent=indent)
subprocess.Popen(gfserver_cmd_str, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
universal_newlines=True, shell=True, executable='/bin/bash')
tries = 0
while tries <= try_limit:
sleep(30)
gfcheck = subprocess.Popen('gfServer status {0} {1}'.format(str(host), str(port)), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, universal_newlines=True, shell=True,
executable='/bin/bash')
out, _ = gfcheck.communicate()
if verbose > 2:
print(out)
if "couldn't connect to localhost" in out.lower():
tries += 1
elif "error" in out.lower():
raise BLATServerError(out)
else:
if verbose:
print(out)
return port
if tries > try_limit:
raise TimeoutError('Timed out!')
def id_ranker(record, perc_score, perc_query_span, perc_ident, expect=None,
indent=0, verbose=1, same_strand=True, return_only=None):
"""Filters results based on score, expectation value, length, percent identity, and span; returns a sorted list.
:param query_record record: Either a SearchIO.QueryResult or a Bio.Blast.Record.
:param float perc_score: Minimum percentage of top score for a hit.
:param float expect: Maximum e-value for a hit (BLAST-only).
:param float perc_query_span: Minimum percent of the longest hit by query coverage for a hit.
:param int perc_ident: Minimum percent identity of a hit.
:param int indent: Indent level for pretty print. [Default: 0]
:param int verbose: Level of verbose output? [Default: 1]
:param bool same_strand: Should the function filter hits with HSPs on different strands? [Default:True]
:param return_only: Should all or only one id be returned?
:return list: Returns a list of tuples containing the final hit data in BED6 format.
"""
id_list = []
if verbose:
print('Beginning ID_Ranker...', indent=indent)
if record.program == 'blat':
if verbose > 2:
print('Results obtained from BLAT run.', indent=indent + 1)
elif 'blast' in record.program:
if verbose > 2:
print('Results obtained from BLAST run.', indent=indent + 1)
else:
raise NotImplementedError('Sorry, your program {} is not yet '
'implemented for RecBlast!'.format(record.program))
# Create filter functions:
def hsp_minscores(hsp):
return hsp.score >= int(perc_score * top_score)
def hsp_min_query_span(hsp):
return hsp.query_span >= perc_query_span * top_length
def hsp_perc_ident(hsp):
return hsp.ident_pct >= perc_ident
def hsp_same_strand(hsp):
if same_strand:
return all([i == hsp.hit_strand_all[0] for i in hsp.hit_strand_all])
else:
return True
def hit_sort_scores(hit):
return sum([hsp.score for hsp in hit.hsps])
def hsp_sort_scores(hsp):
return hsp.score
# Get top stats:
top_score = max([max([hsp.score for hsp in hit.hsps]) for hit in record])
if verbose > 1:
print('Top score for {}:\t'.format(record.id), top_score, indent=indent)
top_length = max([max([hsp.query_span for hsp in hit]) for hit in record])
if verbose > 1:
print('Longest hit for {}:\t'.format(record.id), top_length, indent=indent)
if verbose > 2:
print("ALL HITS STATS:")
print('|\tHit Name:\t|\t# HSPs\t|\tScore:\t|\tLength:\t|\tP.Ident\t|')
print("==========================================================")
for hit in record:
name = hit.id
n_hsp = len(hit.hsps)
print('|\t{HitName}\t|\t{HSP}\t|'.format(HitName=name, HSP=n_hsp))
print("------------------------------------------------------")
for hsp in hit:
print('|\t{id}\t|\t{hf}\t|\t{score}\t|\t{length}\t|\t{ident}\t|'.format(id=hsp.hit_id,
hf=len(hsp),
score=hsp.score,
length=hsp.hit_span,
ident=hsp.ident_pct))
# Execute filters:
# query_span
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs shorter than {}...'.format(perc_query_span * top_length), indent=indent)
record = record.hsp_filter(hsp_min_query_span) if perc_query_span else record
if not record:
text = ('No hits in Query Results match a stretch of the query sequence longer than '
'{0}!').format((top_length * perc_query_span))
raise NoHitsError(text)
# Score
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs with scores less than {}...'.format(top_score * perc_score), indent=indent)
record = record.hsp_filter(hsp_minscores) if perc_score else record
if not record:
text = 'No hits in Query Results have a score above the minimum of {0}!'.format((top_score * perc_score))
raise NoHitsError(text)
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
print('Filtering out all HSPs with percent identity below {}...'.format(perc_ident), indent=indent)
record = record.hsp_filter(hsp_perc_ident) if perc_ident else record
if not record:
text = 'No hits in Query Results have a percent identity above {}%!'.format(round(perc_ident * 100, 2))
raise NoHitsError(text)
if verbose > 1:
print('Number of HSPs for {}:\t'.format(record.id), sum([len(i.hsps) for i in record]), indent=indent)
if same_strand:
print('Filtering out all HSPs that have fragments on opposite strands...')
record = record.hsp_filter(hsp_same_strand) if same_strand else record
if not record:
text = 'No hits in Query Results are on the same strand!'
raise NoHitsError(text)
# Sorting them for good measure
if verbose > 1:
print('Sorting all hits by descending scores!', indent=indent)
record.sort(key=hit_sort_scores, reverse=True, in_place=True)
for hit in record:
hit.sort(key=hsp_sort_scores, reverse=True, in_place=True)
if verbose > 1:
print('Done!', indent=indent)
# Add items to id_list
# Big note: think in HSPs, not Hits
n = 1
for hit in record:
for hsp in hit:
# some quick strand math:
if hsp._has_hit_strand:
strands = set(hsp.hit_strand_all)
if len(strands) == 1:
strand = "+" if strands == {1} else "-"
else:
strand = "."
else:
strand = "."
if verbose > 2:
print("Adding hit {chr}:{s}-{e}({st}) to id list".format(chr=hsp.hit_id,
s=str(hsp.hit_range[0]),
e=str(hsp.hit_range[1]),
st=strand),
indent=indent)
# A little witchcraft before we do though
# turns out hsp.hit_start_all won't necessarily start with the starting point of the hit...
# That means we need to zip hit_start_all and hit_span_all, sort by the first one, then de-zip.
block_starts, block_spans = zip(*sorted(zip(hsp.hit_start_all, hsp.hit_span_all), key=itemgetter(0)))
# chr (start,end) id score strand thickStart thickEnd rgb blockcount blockspans blockstarts query_span
id_list.append([hsp.hit_id, hsp.hit_range, hsp.query_id, hsp.score, strand, hsp.hit_range[0],
hsp.hit_range[1], "255,0,0", len(hsp.hit_start_all),
",".join([str(i) for i in block_spans]),
",".join([str(i - hsp.hit_range[0]) for i in block_starts]), hsp.query_range])
if return_only and n == return_only:
print('Returning only the top {} hits, ending here!'.format(return_only),
indent=indent)
return id_list
n += 1
return id_list
| docmanny/RecSearch | RecBlast/Search.py | Search.py | py | 43,643 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 23,
"usage_type": "argument"
},
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "RecBlast.print",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "RecBlast.print",
"li... |
40877684818 | import argparse
import sys
import datetime
from lib.data_holder import *
from lib.models import *
from lib.extremums import *
###
### GLOBALS
###
CACHE_DIR = 'cache'
data_holder = None
def lazy_data_holder():
global data_holder
if data_holder is None:
data_holder = DataHolder()
return data_holder
###
### PLOT POINT CLOUD
###
def plot_point_cloud():
"""Plot point cloud for protein-ligand pair."""
options_parser = argparse.ArgumentParser(add_help=True)
options_parser.add_argument('--protein', help='protein index', default=0, type=int)
options_parser.add_argument('--ligand', help='ligand index', default=5, type=int)
options = options_parser.parse_args()
cache_file_name = '{}/{}_{}.points'.format(CACHE_DIR, options.protein, options.ligand)
arr = create_cached(cache_file_name, lambda: lazy_data_holder().get_point_array(options.protein, options.ligand))
print('Have {} points for pair ({}, {})'.format(len(arr.spherical), options.protein, options.ligand))
subarr = arr.subarray_for_distance_range(4.5, 5.5)
print(len(subarr.spherical))
scatter_plot_point_array(subarr)
plt.savefig('test.png')
###
### PLOT DENSITY
###
from matplotlib.backends.backend_pdf import PdfPages
def plot_density_with_params(protein, ligand, r_from, r_to):
cache_file_name = '{}/{}_{}.points'.format(CACHE_DIR, protein, ligand)
arr = create_cached(cache_file_name, lambda: lazy_data_holder().get_point_array(protein, ligand))
print('Have {} points for pair ({}, {})'.format(len(arr.spherical), protein, ligand))
# 0, 5, 4.75-5.25 ok
# 0, 6, 4.75-5.25 ok
# 1, 1, 4.25-5.75 ok
pdf_output = PdfPages('plot_{}_{}.pdf'.format(protein, ligand))
#subarr = arr.subarray_for_distance_range(4.75, 5.25)
subarr = arr.subarray_for_distance_range(r_from, r_to)
print(len(subarr.spherical))
scatter_plot_point_array(subarr)
plt.xlabel(r'$\varphi$')
plt.ylabel(r'$\theta$')
plt.savefig('plots/points.png')
plt.title('Source points')
pdf_output.savefig(plt.gcf())
plt.close()
def make_model_density_plot(model, output_file, title):
dist_point_array = get_point_array_for_distance(5)
density = model.get_density_for_points(dist_point_array)
density = density.reshape(100, 100)
imshow_plot_density(density)
extremums = kernel_extremums_2d(density)
plot_extremums(extremums, density)
plt.xlabel(r'$\varphi$')
plt.ylabel(r'$\theta$')
plt.title(title)
plt.savefig(output_file)
pdf_output.savefig(plt.gcf())
plt.close()
if True:
kernel_density_model = KernelDensityModel(subarr)
make_model_density_plot(kernel_density_model, 'plots/density_kernel.png', 'Parzen')
if True:
histogram_density_model = HistogramDensityModel(subarr, r_steps=3, theta_steps=25, phi_steps=25)
make_model_density_plot(histogram_density_model, 'plots/density_histogram.png', 'Histogram')
if True:
gm_density_model = GaussianMixtureDensityModel(subarr)
make_model_density_plot(gm_density_model, 'plots/density_gmm.png', 'Gaussian mixture')
def compare_models(models):
dist_point_array = union_point_arrays([get_point_array_for_distance(5), get_point_array_for_distance(10), get_point_array_for_distance(15)])
volumes = get_point_array_approx_volumes(dist_point_array, n_r=3, n_phi=100, n_theta=100)
densities = [model.get_density_for_points(dist_point_array) for model in models]
def get_average_diff_for_pair(i, j):
# MAPE
#return (np.abs(densities[i] - densities[j]) / (np.minimum(densities[i], densities[j]) + 1e-6) * volumes).sum() / volumes.sum()
# MSE
return ((densities[i] - densities[j]) ** 2 * volumes).sum() / volumes.sum()
for i in range(len(models)):
for j in range(len(models)):
print('pair {} - {}: {}'.format(i, j, get_average_diff_for_pair(i, j)))
compare_models([kernel_density_model, histogram_density_model, gm_density_model])
pdf_output.close()
if False:
def make_model_density_plots_multiple(model, output_file):
dist_point_array = get_point_array_for_distance(5)
densities = model.get_density_for_points(dist_point_array)
for index, density in enumerate(densities):
imshow_plot_density(density.reshape(100, 100))
plt.savefig(output_file.format(index))
plt.close()
N = 10
model = NeuralNetworkModel(subarr, num_iterations=N)
for t in range(300):
make_model_density_plots_multiple(model, 'plots/nn/density_nn_{}_' + str(model.total_iterations) + '.png')
model.optimize_for(N)
def plot_density():
"""Plot point cloud for protein-ligand pair."""
options_parser = argparse.ArgumentParser(add_help=True)
options_parser.add_argument('--protein', help='protein index', default=1, type=int)
options_parser.add_argument('--ligand', help='ligand index', default=1, type=int)
options = options_parser.parse_args()
plot_density_with_params(options.protein, options.ligand, 4.25, 5.75)
def plot_density_preset():
plot_density_with_params(0, 5, 4.75, 5.25)
plot_density_with_params(0, 0, 4.75, 5.25)
plot_density_with_params(1, 1, 4.25, 5.75)
###
### MOD CHOOSER
###
def mod_chooser_main(available_modes):
available_mode_names = [
mode.__name__ for mode in available_modes
]
is_known_mode = len(sys.argv) >= 2 and sys.argv[1] in available_mode_names
need_help = len(sys.argv) >= 2 and sys.argv[1] == '--help'
if not need_help and not is_known_mode and len(sys.argv) >= 2:
print('Unknown mode: "{}"\n'.format(sys.argv[1], available_mode_names))
if not is_known_mode or need_help:
print('Usage: {} <mode> <options>'.format(sys.argv[0]))
print('')
print('Available modes:')
for mode in available_modes:
print('{} - {}'.format(mode.__name__, mode.__doc__))
sys.exit(1)
mode = sys.argv[1]
mode_func = available_modes[available_mode_names.index(mode)]
del sys.argv[1]
mode_func()
###
### MAIN
###
def main():
available_modes = [
plot_point_cloud,
plot_density,
plot_density_preset
]
mod_chooser_main(available_modes)
if __name__ == '__main__':
main()
| Intelligent-Systems-Phystech/ProbabilisticMetricSpaces | code/main.py | main.py | py | 6,488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.backends.backend_pdf.PdfPages",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 145,
"usage_type": "call"
},
... |
39908905434 | # Imports from FindRoots
from FindRoots import fixPoint
from FindRoots import bisection
from FindRoots import falsPos
from FindRoots import newRap
from FindRoots import secant
from FindRoots import multipleRoot
from FindRoots import intervals
from FindRoots import derivatives
# Imports from Integrals
from Integrals import Riemann
from Integrals import trapecio
from Integrals import Simpson
from Integrals import Romberg
from Integrals import gaussSeidel
from Integrals import jacobi
from Integrals import jacobiMatriz
# Imports from Regressions
from Regressions import linear
from Regressions import polinomial
# Imports from Interpolations
from Interpolations import newtonInter
from Interpolations import lagrannge
# imports for operations
import math
import matplotlib.pyplot as plt
import numpy as np
# Menus
def mainMenu():
print("-------------------------Menú Principal-------------------------")
print(" 1. Raíces y Derivadas\n",
"2. Integrales\n",
"3. Matirces\n",
"4. Regresiones\n",
"5. Interpolaciones\n")
def derivativesAndRootsMenu():
print("----------------------------------------------------------------")
print(" 1. Encontrar derivadas\n",
"2. Buscar Raíces\n")
def rootsMenu():
print("--------------------------Menú Raíces---------------------------")
print(" 1. Método de Punto Fijo (Modificar para g(x))\n",
"2. Método de Bisección\n",
"3. Método de Falsa Posición\n",
"4. Método de Newton-Raphson\n",
"5. Método de la Secante\n",
"6. Método de Raíces Múltiples (Newton-Raphson modificado)\n")
def integralsMenu():
print("-------------------------Menú Integrales------------------------")
print(" 1. Integral por Riemann\n",
"2. Integral por Trapecios\n",
"3. Integral por Simpson\n",
"4. Integral por Romberg\n")
def matrixsMenu():
print("-------------------------Menú Matrices--------------------------")
print(" 1. Matriz de GaussSeidel\n",
"2. Matriz de Jacobi (Lambdas)\n",
"3. Matriz de Jacobi (Algebra Matricial)\n",
"4. Matriz identidad")
def regressionsMenu():
print("------------------------Menú Regresiones------------------------")
print(" 1. Regresión Lineal\n",
"2. Regresión Polinomial\n")
def interpolationsMenu():
print("----------------------Menú Interpolaciones----------------------")
print(" 1. Interpolación por Newton\n",
"2. Interpolación por Lagrannge\n")
# Grafic Function
def graphic_function(f : callable, x_range = (-20, 20), step = 0.1):
x = np.arange(x_range[0], x_range[1], step)
y = []
for val in x:
try:
y_val = f(val)
y.append(y_val)
except:
y.append(float('nan'))
plt.figure(figsize = (8,6))
plt.plot(x, y)
plt.title('Gráfico de la función')
plt.xlabel('x')
plt.ylabel('y')
plt.grid(True)
plt.show()
# Function
def f(x):
#return x**2
#return math.exp(-x**2)
#return math.cos(x)
#return math.sin(x)
return math.log(x)
mainMenu()
mainMenu = int(input("Qué acción quiere realizar?\n"))
if mainMenu == 1: # Derivatives and Find roots
derivativesAndRootsMenu()
menuRaizDeri = int(input("Qué acción quiere realizar?\n"))
if menuRaizDeri== 1:
valueX = float(input("En que valor de X quiere la aproximación?\n"))
derivatives.mainDerivates(f, valueX)
graphic_function(f)
elif menuRaizDeri == 2:
rootsMenu()
rootMetod = int(input("Qué método desea usar?\n")) # Options to select metod
# Fixed Point Metod
if rootMetod == 1:
x0 = 1
fixPoint.fix_point(x0, f)
graphic_function(f)
# Bisection Metod
elif rootMetod == 2:
intervals = intervals.Search_Interval(f, -100, 100)
if intervals:
x0, x1 = intervals[-1]
bisection(x0, x1, f)
else:
print("No se encontraron intervalos para aplicar el método de bisección.")
graphic_function(f)
# False Position Metod
elif rootMetod == 3:
intervals = intervals.Search_Interval(f, -100, 100)
if intervals:
x0, x1 = intervals[-1]
falsPos.false_position(x0, x1, f)
else:
print("No se encontraron intervalos para aplicar el método de falsa posición.")
graphic_function(f)
# Newton-Raphson Metod
elif rootMetod == 4:
x0 = 0
newRap.newton_raphson(x0, f)
graphic_function(f)
# Secant Metod
elif rootMetod == 5:
intervals = intervals.Search_Interval(f, -100, 100)
if intervals:
x0, x1 = intervals[-1]
secant.sec(x0, x1, f)
else:
print("No se encontraron intervalos para aplicar el método de la secante.")
graphic_function(f)
# Multiple Roots
elif rootMetod == 6:
x0 = 0
multipleRoot.mul_New_Rap(x0, f)
graphic_function(f)
else:
print("La opción elegida no existe")
else:
print("La opción elegida no existe")
elif mainMenu == 2: # Integrals
integralsMenu()
integralMetod = int(input("Qué método desea usar?\n")) # Options to select metod
# Riemann Metod
if integralMetod == 1:
Riemann.RiemannLeftExt(0, 3, 5, f)
Riemann.RiemannMid(0, 3, 5, f)
Riemann.RiemannRightExt(0, 3, 5, f)
# Trapeze Metod
elif integralMetod == 2:
trapecio.trapeze(0, 20000, 5, f)
trapecio.ntrapeze(0, 20000, 5, f)
# Simpson Metod
elif integralMetod == 3:
Simpson.Simpson(0, 20000, 3, f)
Simpson.nSimpson(0, 20000, 8, f)
# Romberg Metod
elif integralMetod == 4:
Romberg.Romberg(0,20000,4,f)
else:
print("La opción elegida no existe")
elif mainMenu == 3: # Matrixs
matrixsMenu()
matrixMetod = int(input("Qué método desea usar?\n")) # Options to select metod
# Gauss-Seidel Matrix
if matrixMetod == 1:
A = np.array([[6, 2, 1],
[2, 3, 1],
[2, 1, 4]])
b = np.array(
[1, 0, 0]
)
gaussSeidel.gaussSeidel(A, b, 10, 2, tol= 0.001)
# Jacobi Matrix (With Lambdas)
elif matrixMetod == 2:
f1 = lambda x, y ,z: (4-y-2*z)/3 #3x, f1--> x
f2 = lambda x, y ,z: (6-2*x-z)/1 #y, f2--> y
f3 = lambda x, y ,z: (2-x-4*y)/6 #6z, f3--> z
jacobi.jacobi(f1, f2, f3)
# Jacobi Matrix (Matrix Algebra)
elif matrixMetod == 3:
A = np.array([[3, 1, 2],
[2, 1, 1],
[1, 4, 6]])
b = np.array(
[4, 6, 2]
)
jacobiMatriz.jacobiMatriz(A, b, 10, 2)
#Matriz inversa e identidad
elif matrixMetod == 4:
A = np.array([[6, 2, 1, 0],
[2, 3, 1, 0],
[2, 1, 4, 2],
[1, 0, 0, 3]])
B = np.linalg.inv(A)
C = np.dot(A,B)
print("Matriz")
print(A)
print("\nInversa")
print(B)
print("\nIdentidad")
print (C)
else:
print("La opción elegida no existe")
elif mainMenu == 4: # Regressions
regressionsMenu()
regressionType = int(input("Qué tipo de regresión desea usar?\n")) # Options to select metod
# Linear Regression
if regressionType == 1:
dataX = np.array([1.1, 2, 3.01, 4, 4.98, 6, 7.02, 8])
dataY = np.array([2.5, 5.1, 8, 9.6, 10.8, 14, 15.1, 18])
a0, a1 = linear.minSqr(dataX, dataY)
# Graphic Linear Regression
resol = 20
xx = np.linspace(-1,12, resol)
yy = a0 +a1*xx
fig, ax = plt.subplots()
ax.plot(xx, yy, 'b')
ax.plot(dataX, dataY, 'o')
plt.grid()
plt.show()
# Polinomial Regression
elif regressionType == 2:
dataX = np.array([1.1, 2.1, 3.01, 4, 4.98, 6.1, 7.02, 8, 9, 10])
dataY = np.array([4.1, 5.2, 12.2, 19, 31, 43, 52, 71, 84.6, 104])
m = 2; n = len(dataX)
if n < m + 1:
print('Pocos datos (n < m + 1)')
else:
a0, a1, a2 = polinomial.minSqr(dataX, dataY, m, n)
# Graphic Polinomial Regression
resol = 100
xx = np.linspace(-2, 12, resol)
yy = a0 + a1*xx + a2*xx**2
fig, ax = plt.subplots()
ax.plot(xx, yy, 'r')
ax.plot(dataX, dataY, 'o')
plt.grid()
plt.show()
else:
print("La opción elegida no existe")
elif mainMenu == 5: # Interpolations
interpolationsMenu()
interpolMetod = int(input("Qué método desea usar?\n")) # Options to select metod
# Interpolation for Newton
if interpolMetod == 1:
x = 2
interpolation = newtonInter.Newton(1,5,5,f,x)
error = abs(interpolation - f(2))/(f(2))
print("Interpolación por Newton : ", interpolation,"\nError: ",error*100)
elif interpolMetod == 2:
x = 2
interpolation = lagrannge.Lagrannge(1,5,5,f,x)
error = abs(interpolation - f(2))/(f(2))
print("Interpolación por Lagrannge : ", interpolation,"\nError: ",error*100)
else:
print("La opción elegida no existe")
else:
print("La opción elegida no existe") | Ngonzalez693/MetodosNumericos | ProgramMetods/main.py | main.py | py | 9,646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 92,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 92,
"usage_type": "name"
},
{
"api_name": "matplotlib.py... |
22612711093 | import urllib3
from kivy.clock import Clock
from functools import partial
class Ping():
# Выводим статус подключения к сети на главной странице и странице Настроек
def getPing(self, ti):
try:
http=urllib3.PoolManager()
response = http.request('GET', 'http://google.com')
self.screens[0].ids.net_label.text = 'Online'
self.screens[3].ids.net_bottom.text = 'Online'
except:
self.screens[0].ids.net_label.text = 'Offline'
self.screens[3].ids.net_bottom.text = 'Offline'
def callPing(self):
Clock.schedule_interval(partial(Ping.getPing, self), 15) | IPIvliev/Pro_2 | Moduls/ping.py | ping.py | py | 655 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "urllib3.PoolManager",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock.schedule_interval",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "kivy.clock.Clock",
"line_number": 20,
"usage_type": "name"
},
{
"api_name"... |
23620498135 | import math
import json
import argparse
import cv2
import numpy as np
from shapely.geometry import Point
from PIL import Image, ImageDraw, ImageFont
SIZE = 60, 60
INNER_CIRCLE_DIAM = 28
HOLE_SPACING = 0
HOLE_SIZE = 0.800 # only used for debug output
SPHERICAL_DOME_MODE = True
SPHERE_RADIUS = 60/2
SPHERE_DIST = 25.055 # measured from work position zero
SPIRAL_SORT = False
DEBUG_IMAGE = "debug.png"
DEBUG_SCALE = 20
IMAGE_SIZE = (SIZE[0] * DEBUG_SCALE, SIZE[1] * DEBUG_SCALE) # mm
SVG_SCALE = 3.78
SVG_OFFSET = [-SIZE[0]/2*SVG_SCALE, -SIZE[1]/2*SVG_SCALE]
GCODE_FILE = "output.gcode"
SVG_FILE = "output.svg"
TRAVEL_SPEED = 600
RAISE_SPEED = 400
DRILL_SPEED = 80
DRILL_START = -2
DRILL_DEPTH = -3 -6 -1
SAFE_HEIGHT = 6 # at the start
SAFE_HEIGHT_LOW = 1 # used within the area
# ---
# TORUS
TORUS_FILE = "debruijn_torus_16_32_3_3.npy"
HOLE_SPACING = 1.60
# TORUS_FILTER_INVERT = False # RED
TORUS_FILTER_INVERT = True # GREEN
# HEXMAP
HEXMAP_FILE = "2678_data_3_i-1725881.json"
# HEXMAP_FILE = "2633_data_0_i-1303443.json"
HOLE_SPACING = 2.00
# HEXMAP_FILTER_VALUE = 0
HEXMAP_FILTER_VALUE = 1
# ---
MODE_HEXMAP = "hexmap"
MODE_TORUS = "torus"
MODE_GRID = "grid"
# ---
START_CMD = """
G90 (abs coords)
G21 (units: mm)
G1 F{travel_speed}
G1 Z{z:.4f} (move to safe height)
G1 X0 Y0 Z{z:.4f} (move to zero)
G92 X0 Y0 Z{z:.4f} A0 (reset extruder axis)
"""
# MOVE_CMD = """
# M03 S{power}
# G4 P0
# G1 F{move_speed}
# G1 X{x} Y{y}
# """
MOVE_CMD = """
G1 F{travel_speed}
G1 X{x:.4f} Y{y:.4f} Z{z:.4f}
"""
END_CMD = """
G4 P0
G1 F{travel_speed}
G1 Z{z:.4f}
G1 X0 Y0 Z{z:.4f}
"""
def cut_poly(f, poly):
coords = poly.exterior.coords
# move to start of polygon
f.write(MOVE_CMD.format(
x=coords[0][0], y=coords[0][1],
travel_speed=TRAVEL_SPEED))
f.write(CUT_CMD.format(
x=coords[0][0], y=coords[0][1],
power=LASER_CUT,
cut_speed=CUT_SPEED))
# cut
for i in range(1, len(coords)):
p = coords[i]
f.write(CUT_CMD.format(
x=p[0], y=p[1],
power=LASER_CUT,
cut_speed=CUT_SPEED))
# close last segment
f.write(CUT_CMD.format(
x=coords[0][0], y=coords[0][1],
power=LASER_CUT,
cut_speed=CUT_SPEED))
def calculate_dome_offset(x, y, distance, radius, center=(SIZE[0]/2, SIZE[1]/2)):
xy_dist = math.sqrt((center[0]-x)**2 + (center[0]-y)**2)
return math.sqrt(radius**2 - xy_dist**2) - distance
def pointy_hex_to_pixel(q, r, s, center=[0, 0], hex_size=HOLE_SPACING/2):
x = hex_size * (math.sqrt(3) * q + math.sqrt(3)/2 * r)
y = hex_size * (3./2 * r)
return (x + center[0], y + center[1])
# ------------------------------------------------------------------------------------------
font = ImageFont.load_default()
font_large = ImageFont.truetype("FiraMono-Regular.ttf", 16)
font_large_bold = ImageFont.truetype("FiraMono-Bold.ttf", 16)
circle = Point(SIZE[0]/2, SIZE[1]/2).buffer(INNER_CIRCLE_DIAM/2)
points = []
ap = argparse.ArgumentParser()
ap.add_argument(
"mode",
default=MODE_HEXMAP,
choices=[MODE_HEXMAP, MODE_TORUS, MODE_GRID],
help=""
)
args = vars(ap.parse_args())
if SPHERICAL_DOME_MODE:
print("SPHERICAL DOME MODE")
if args["mode"] == MODE_HEXMAP:
data = None
with open(HEXMAP_FILE, "r") as f:
data = json.load(f)
for key in data["data"]:
if not data["data"][key] == HEXMAP_FILTER_VALUE:
continue
q, r, s = [int(c) for c in key.split("|")]
x, y = pointy_hex_to_pixel(q, r, s, center=[SIZE[0]/2, SIZE[1]/2])
# mirror Y axis so it can be observed through the lens on the back
y = SIZE[1] - y
p = Point(x, y)
if not circle.intersection(p):
continue
points.append(p)
print("generated {}/{} hexmap points".format(len(points), len(data["data"].keys())))
elif args["mode"] == MODE_TORUS:
print("TORUS MODE")
torus = np.load(TORUS_FILE)
# mirror torus vertically so it can be observed through the lens on the back
torus = np.flip(torus, axis=1)
offset = [torus.shape[1]*HOLE_SPACING/2-HOLE_SPACING/2, torus.shape[0]*HOLE_SPACING/2-HOLE_SPACING/2]
for i in range(torus.shape[0]):
line = []
for j in range(torus.shape[1]):
if TORUS_FILTER_INVERT:
if not torus[i, j]:
continue
else:
if torus[i, j]:
continue
p = Point(SIZE[0]/2 + j*HOLE_SPACING - offset[0], SIZE[1]/2 + i*HOLE_SPACING - offset[1])
if not circle.intersection(p):
continue
line.append(p)
if i % 2 == 0:
points += line
else:
points += reversed(line)
print("generated {}/{} torus points".format(len(points), torus.shape[0]*torus.shape[1]))
elif args["mode"] == MODE_GRID:
# regular grid
# for x in np.linspace(0, SIZE[0], math.floor(SIZE[0]/HOLE_SPACING)):
# for y in np.linspace(0, SIZE[1], math.floor(SIZE[1]/HOLE_SPACING)):
# p = Point(x, y)
# if circle.intersection(p):
# points.append(Point(x, y))
# hex grid
print("GRID HEXAGON MODE")
size = HOLE_SPACING
w = math.sqrt(3) * size
h = 2 * size
num_x = int(SIZE[0]/w)
num_y = int(SIZE[1]/(0.75*h))
offset = [0, 0]
if num_y % 2 == 0:
offset = [SIZE[0]-num_x*w, SIZE[1]-num_y*(0.75*h)]
else:
offset = [SIZE[0]-num_x*w, SIZE[1]-num_y*(0.75*h)+0.75*h]
print("hexagons - horizontal: {} | vertical: {}".format(num_x, num_y))
print("hex offsets: {:6.3f} {:6.3f}".format(*offset))
for y in range(0, num_y):
line = []
for x in range(0, num_x):
p = None
if y % 2 == 0:
p = Point(offset[0]/2 + x*w, offset[1]/2 + 0.75*h*y) # + 0.5*h)
else:
p = Point(offset[0]/2 + 0.5*w + x*w, offset[1]/2 + 0.75*h*y) # + 0.5*h)
if not circle.intersection(p):
continue
line.append(p)
# if x == 0:
# print("{} {}".format(y, p))
if y % 2 == 0:
points += line
else:
points += reversed(line)
else:
print("mode missing")
sys.exit(-1)
if SPIRAL_SORT:
# spiral traversal order, naive implementation
points_sorted = []
center = [SIZE[0]/2, SIZE[1]/2]
points = sorted(points, key=lambda p: math.sqrt(math.pow(center[0] - p.x, 2) + math.pow(center[1] - p.y, 2)), reverse=False)
points_sorted.append(points[0])
points = points[1:]
# weighting
while len(points) > 0:
points = sorted(
points,
key=lambda p:
0.45 * math.sqrt(math.pow(center[0] - p.x, 2) + math.pow(center[1] - p.y, 2)) + # distance to center
0.55 * math.sqrt(math.pow(points_sorted[-1].x - p.x, 2) + math.pow(points_sorted[-1].y - p.y, 2)), # distance to last point
reverse=False
)
points_sorted.append(points[0])
points = points[1:]
points = list(reversed(points_sorted))
# ---
print("num points: {}".format(len(points)))
with Image.new(mode="RGB", size=IMAGE_SIZE) as im:
draw = ImageDraw.Draw(im, "RGBA")
for x in range(1, IMAGE_SIZE[0]//100):
draw.line([x*100, 0, x*100, IMAGE_SIZE[1]], width=1, fill=(40, 40, 40))
for y in range(1, IMAGE_SIZE[1]//100):
draw.line([0, y*100, IMAGE_SIZE[0], y*100], width=1, fill=(40, 40, 40))
draw.text((25, 5+20), "HOLE SPACING:", (255, 255, 255), font=font_large)
draw.text((25+170, 5+20), " {:2.3f} mm".format(HOLE_SPACING), (255, 255, 255), font=font_large_bold)
draw.text((25, 5+20*2), "HOLE SIZE:", (255, 255, 255), font=font_large)
draw.text((25+170, 5+20*2), " {:2.3f} mm".format(HOLE_SIZE), (255, 255, 255), font=font_large_bold)
draw.text((25, 5+20*3), "INNER CIRCLE:", (255, 255, 255), font=font_large)
draw.text((25+170, 5+20*3), " {:2.2f} mm".format(INNER_CIRCLE_DIAM), (255, 255, 255), font=font_large_bold)
draw.line([25, 10+20*4, 270, 10+20*4], width=1, fill=(80, 80, 80))
draw.text((25, 20+20*4), "total points:", (255, 255, 255), font=font_large)
draw.text((25+170, 20+20*4), " {}".format(len(points)), (255, 255, 255), font=font_large_bold)
val = " - "
if args["mode"] == MODE_HEXMAP:
val = str(HEXMAP_FILTER_VALUE)
elif args["mode"] == MODE_TORUS:
val = str(TORUS_FILTER_INVERT)
draw.text((25, IMAGE_SIZE[1]-30), "FILTER VALUE:", (255, 255, 255), font=font_large)
draw.text((25+170, IMAGE_SIZE[1]-30), val, (255, 255, 255), font=font_large_bold)
draw.line([0, IMAGE_SIZE[1]/2, IMAGE_SIZE[0], IMAGE_SIZE[1]/2], width=1, fill=(40, 0, 0))
draw.line([IMAGE_SIZE[0]/2, 0, IMAGE_SIZE[0]/2, IMAGE_SIZE[1]], width=1, fill=(40, 0, 0))
for i in range(0, len(points)-1):
cur = points[i]
nxt = points[i+1]
draw.line([
int(cur.x * DEBUG_SCALE), int(cur.y * DEBUG_SCALE),
int(nxt.x * DEBUG_SCALE), int(nxt.y * DEBUG_SCALE)],
width=4, fill=(0, 0, 150))
for point in points:
coords = [(int(x * DEBUG_SCALE), int(y * DEBUG_SCALE)) for x, y in point.buffer(HOLE_SIZE/2).exterior.coords]
draw.polygon(coords, fill="white")
coords = [(int(x * DEBUG_SCALE), int(y * DEBUG_SCALE)) for x, y in circle.exterior.coords]
draw.polygon(coords, outline="red")
im.save(DEBUG_IMAGE)
with open(GCODE_FILE, "w") as f:
f.write(START_CMD.format(
travel_speed=TRAVEL_SPEED,
z=SAFE_HEIGHT
))
# move to initial position
f.write(MOVE_CMD.format(
x=0, y=0, z=SAFE_HEIGHT,
travel_speed=TRAVEL_SPEED))
# move to first point
coords = list(points[0].coords)[0]
f.write(MOVE_CMD.format(
x=coords[0], y=coords[1], z=SAFE_HEIGHT,
travel_speed=TRAVEL_SPEED))
spherical_offset = 0
for i in range(0, len(points)):
p = list(points[i].coords)[0]
if SPHERICAL_DOME_MODE:
spherical_offset = calculate_dome_offset(p[0], p[1], SPHERE_DIST, SPHERE_RADIUS)
p = [p[0], p[1]*-1+SIZE[1]] # flip Y coordinate to convert top-left coordinate system (numpy matrix, PIL image) to bottom-left system (gcode)
# move
f.write(MOVE_CMD.format(
x=p[0], y=p[1], z=SAFE_HEIGHT_LOW,
travel_speed=TRAVEL_SPEED))
# lower
f.write(MOVE_CMD.format(
x=p[0], y=p[1], z=DRILL_START,
travel_speed=TRAVEL_SPEED))
# drill
f.write(MOVE_CMD.format(
x=p[0], y=p[1], z=DRILL_DEPTH + spherical_offset,
travel_speed=DRILL_SPEED))
# print(DRILL_DEPTH + spherical_offset)
# raise
f.write(MOVE_CMD.format(
x=p[0], y=p[1], z=SAFE_HEIGHT_LOW,
travel_speed=RAISE_SPEED))
f.write(
END_CMD.format(
travel_speed=TRAVEL_SPEED,
z=SAFE_HEIGHT
))
print("written to file: {}".format(GCODE_FILE))
with open(SVG_FILE, "w") as f:
f.write("<?xml version=\"1.0\" encoding=\"utf-8\" ?>\n")
f.write("<?xml-stylesheet href=\"style.css\" type=\"text/css\" title=\"main_stylesheet\" alternate=\"no\" media=\"screen\" ?>\n")
f.write("<svg baseProfile=\"tiny\" version=\"1.2\" width=\"{}{}\" height=\"{}{}\"\n".format(SIZE[0]*SVG_SCALE, "mm", SIZE[1]*SVG_SCALE, "mm"))
f.write("xmlns=\"http://www.w3.org/2000/svg\" xmlns:ev=\"http://www.w3.org/2001/xml-events\" xmlns:xlink=\"http://www.w3.org/1999/xlink\">\n")
f.write("<defs />\n")
f.write("<circle cx=\"{}\" cy=\"{}\" r=\"{}\" fill=\"none\" stroke=\"black\" />\n".format(
SIZE[0]/2*SVG_SCALE+SVG_OFFSET[0],
SIZE[1]/2*SVG_SCALE+SVG_OFFSET[1],
INNER_CIRCLE_DIAM/2*SVG_SCALE))
for i in range(0, len(points)):
p = list(points[i].coords)[0]
f.write("<circle cx=\"{}\" cy=\"{}\" r=\"{}\" />\n".format(
p[0]*SVG_SCALE+SVG_OFFSET[0],
p[1]*SVG_SCALE+SVG_OFFSET[1],
HOLE_SIZE/2*SVG_SCALE))
f.write("</svg>\n")
print("written to file: {}".format(SVG_FILE))
| volzotan/LensLeech | fabrication/stencil/cncwrite.py | cncwrite.py | py | 13,041 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "math.sqrt",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 127,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "PIL.ImageFont.load_default",
"li... |
36369809558 | from ast import literal_eval
import os, sys
import customtkinter as ctk
from PIL import Image
ctk.set_appearance_mode("dark")
ctk.set_default_color_theme("blue")
from numpy import number
sys.path.insert(0, os.path.dirname("algorithms"))
import random
import copy
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
import csv
from pathlib import Path
import pandas as pd
from tkinter import Menu
from tkinter import font
from tkinter import filedialog
import numpy as np
from csv_cleaner import Csv_Cleaner
from csv_editor import Application
from algorithms.naive_classification import NaiveClassification
from algorithms.metrics import Metrics
from algorithms.knn import KNN
from algorithms.naive_bayes import NaiveBayes
####
# useful dictionnary
classes_labels = {
4 : 'positive',
2 : 'neutral',
0 : 'negative'
}
###
"""
Initializes the splash window to display the XSA logo
"""
splash_root = tk.Tk()
splash_root.title('XSA - GUI')
splash_root.geometry("500x500")
path = Path(__file__).parent / "."
logo_path = (path / "./assets/black_logo.png").resolve()
photo = tk.PhotoImage(file=logo_path)
image_label = ttk.Label(
splash_root,
image=photo,
text='XSA',
compound='top'
)
image_label.pack()
####################
# GLOBAL VARIABLES #
####################
single_input_classification = None
number_of_k_value = None
distance_value = None
active_dataset = None
variante1 = None
variante2 = None
variante3 = None
def main():
"""
Defining all the necessary functions
"""
def set_active_dataset(df):
"""Sets the active dataset global variable to the dataset given in parameter.
Tha active dataset is the one opened in the CSV Viewer
Args:
df (pd.dataframe): the dataset opened in the csv viewer
"""
global active_dataset
active_dataset = df
def get_active_dataset():
"""Retrieves the value stored in the active dataset global variables
Returns:
pd.dataframe: the active dataset
"""
global active_dataset
return active_dataset
def open_csv_file():
"""Opens a filedialog asking for the user to choose a csv file, then call the display_csv_data function to display it on the csv viewer in the app.
"""
file_path = filedialog.askopenfilename(title="Open CSV File", filetypes=[("CSV files", "*.csv")])
if file_path:
display_csv_data(file_path)
def ask(question):
"""Asks a yes/no question to the user. Useful for basic input
Args:
question (str): the question asked to the user
Returns:
bool: litteraly yes or no
"""
response = messagebox.askyesno("XSA - User input",
question,
icon ='question')
if response: # If the answer was "Yes" response is True
return True
else: # If the answer was "No" response is False
return False
def display_csv_data(file_path):
"""This function displays a csv in the csv viewer, the central panel in the application.
Upon opening, the function asks the user of he wants his data to be cleaned, then is the file has a header, if not, default header will be applied.
Args:
file_path (str): path to the file that is to be displayed
Returns:
None: if the user decides to cancel the operation
"""
clean_data = ask("Would you like to clean your data ?")
file_name = ""
df = pd.read_csv(file_path, on_bad_lines='skip')
set_active_dataset(df)
if clean_data:
df = pd.read_csv(file_path, on_bad_lines='skip', header=None)
cleaner = Csv_Cleaner(file_path)
df = cleaner.clean()
set_active_dataset(df)
f = filedialog.asksaveasfile(mode='w', defaultextension=".csv")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
f.write(active_dataset.to_csv(index=False, sep=",", header=True, quotechar='"', lineterminator="\r"))
file_name = f.name
f.close()
else:
file_name = file_path
header_prsent = ask("Does the file have a header ?")
with open(file_name, 'r', newline='') as file:
csv_reader = csv.reader(file, quoting=csv.QUOTE_ALL)
if header_prsent:
header = next(csv_reader)
else:
header=[f"col_{x}" for x in range(count(df.columns))]
tree.delete(*tree.get_children()) # Clear the current data
tree["columns"] = header
for col in header:
tree.heading(col, text=col)
tree.column(col, width=300, stretch=True)
for row in csv_reader:
tree.insert('', "end", values=row)
def user_selection_model_parameter(model):
"""Allows the user to select the hyperparameters of his model before training the model.
Args:
model (str): the str equivalent of the model (e.g : 'knn', 'naive_classification', 'naive_bayes' ...)
"""
global number_of_k_value
global distance_value
global vote_value
global variante1
global variante2
global variante3
number_of_k_value = tk.StringVar()
distance_value = tk.StringVar()
vote_value = tk.StringVar()
variante1 = tk.StringVar()
variante2 = tk.StringVar()
variante3 = tk.StringVar()
if model == 'knn':
new= ctk.CTkToplevel(master=root)
new.geometry("400x600")
new.title("User Input KNN")
my_font = ctk.CTkFont(family="Helvetica", size=20, weight="bold")
ctk.CTkLabel(new, text="Number of K", font=my_font).pack(pady=20)
ctk.CTkEntry(new, textvariable=number_of_k_value).pack(pady=20, padx=10)
ctk.CTkLabel(new,text="Distance", font=my_font).pack(pady=20)
choices = ["naive", "levenshtein", "lcs", "damerau_levenshtein", "hamming", "jaro", "cosine", "jaccard", "sorensen_dice", "qgram_dice"]
ctk.CTkComboBox(new, variable=distance_value, values=choices).pack(pady=40, padx=10)
ctk.CTkLabel(new, text="Vote", font=my_font).pack(pady=20)
votes = ["majoritaire", "pondéré"]
ctk.CTkComboBox(new, variable=vote_value, values=votes).pack(pady=20, padx=10)
if model == "naive_bayes":
new= ctk.CTkToplevel(master=root)
new.geometry("400x600")
new.title("User Input NB")
my_font = ctk.CTkFont(family="Helvetica", size=20, weight="bold")
ctk.CTkLabel(new,text="Variante 1", font=my_font).pack(pady=20)
choices = ["fréquence", "présence"]
ctk.CTkComboBox(new, variable=variante1, values=choices).pack(pady=40, padx=10)
ctk.CTkLabel(new, text="Variante 2 (stopwords)", font=my_font).pack(pady=20)
votes = ["avec", "sans"]
ctk.CTkComboBox(new, variable=variante2, values=votes).pack(pady=20, padx=10)
ctk.CTkLabel(new, text="Variante 3", font=my_font).pack(pady=20)
votes = ["uni-gramme", "bi-gramme", "both"]
ctk.CTkComboBox(new, variable=variante3, values=votes).pack(pady=20, padx=10)
ctk.CTkButton(new, text="Validate Parameters and exit", command=new.destroy).pack(pady=30)
root.wait_window(new)
def test_model_dataset():
"""tests the model based on the parameters that were input by the user.
"""
selection = algo_var.get()
if selection == 'naive_bayes':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
df_train = copy.deepcopy(active_dataset)
messagebox.showinfo(title="Info Naive Bayes", message="Load your testing data")
open_csv_file()
df_test = copy.deepcopy(active_dataset)
user_selection_model_parameter("naive_bayes")
nb_model = NaiveBayes(df_train, variante1.get(), variante2.get(), variante3.get())
classifications = []
for tweet_token in df_test["Tweet_Tokenized"]:
tweet_a_categoriser = " ".join(literal_eval(tweet_token))
classifications.append(nb_model.classification(tweet_a_categoriser))
df_test["model_class"] = classifications
set_active_dataset(df_test)
f = filedialog.asksaveasfile(mode='w', defaultextension=".csv")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
f.write(active_dataset.to_csv(index=False, sep=",", header=True, quotechar='"', lineterminator="\r"))
file_name = f.name
f.close()
display_csv_data(file_name)
elif selection == 'knn':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
df_train = copy.deepcopy(active_dataset)
messagebox.showinfo(title="Info KNN", message="Load your testing data")
open_csv_file()
df_test = copy.deepcopy(active_dataset)
user_selection_model_parameter("knn")
knn_model = KNN(df_train ,number_of_k_value.get(), distance_value.get(), vote_value.get())
classifications = []
for tweet_token in df_test["Tweet_Tokenized"]:
tweet_a_categoriser = " ".join(literal_eval(tweet_token))
classifications.append(knn_model.classification(tweet_a_categoriser))
df_test["model_class"] = classifications
set_active_dataset(df_test)
f = filedialog.asksaveasfile(mode='w', defaultextension=".csv")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
f.write(active_dataset.to_csv(index=False, sep=",", header=True, quotechar='"', lineterminator="\r"))
file_name = f.name
f.close()
display_csv_data(file_name)
elif selection == 'naive_classification':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a dataset in the CSV viewer")
else:
nc = NaiveClassification(active_dataset)
classified_df = nc.get_classified()
set_active_dataset(classified_df)
f = filedialog.asksaveasfile(mode='w', defaultextension=".csv")
if f is None: # asksaveasfile return `None` if dialog closed with "cancel".
return
f.write(active_dataset.to_csv(index=False, sep=",", header=True, quotechar='"', lineterminator="\r"))
file_name = f.name
f.close()
display_csv_data(file_name)
def get_user_input_for_single_classification():
"""Asks the user for a single input to be classified. Mostly used as a demo example to avoid spending too much time loading the datasets.
"""
global single_input_classification
single_input_classification = tk.StringVar()
new= ctk.CTkToplevel(master=root)
new.geometry("300x300")
new.title("User Input")
my_font = ctk.CTkFont(family="Helvetica", size=20, weight="bold")
ctk.CTkLabel(new, text="Tweet Input", font=my_font).pack(pady=20)
ctk.CTkEntry(new, width=200,textvariable=single_input_classification).pack(pady=20, padx=10)
ctk.CTkButton(new, text="Validate input and exit", command=new.destroy).pack(pady=30)
root.wait_window(new)
def test_model_single_input():
"""Tests the model if the mode is single_input_classification
"""
selection = algo_var.get()
if selection == 'naive_bayes':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
user_selection_model_parameter("naive_bayes")
get_user_input_for_single_classification()
tweet_a_categoriser = single_input_classification.get()
cleaner = Csv_Cleaner(is_single_input=True, single_input=tweet_a_categoriser)
tweet_a_categoriser_clean = cleaner.clean()
nb_model = NaiveBayes(active_dataset, variante1.get(), variante2.get(), variante3.get())
classification = nb_model.classification(" ".join((tweet_a_categoriser_clean)), single_input_classification=True)
messagebox.showinfo(title="Info", message=f"Your input '{tweet_a_categoriser}' has been classsified as : {classes_labels[int(classification)]}")
if selection == 'knn':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
user_selection_model_parameter("knn")
get_user_input_for_single_classification()
tweet_a_categoriser = single_input_classification.get()
cleaner = Csv_Cleaner(is_single_input=True, single_input=tweet_a_categoriser)
tweet_a_categoriser_clean = cleaner.clean()
knn_model = KNN(active_dataset ,number_of_k_value.get(), distance_value.get(), vote_value.get())
classification = knn_model.classification(" ".join((tweet_a_categoriser_clean)))
messagebox.showinfo(title="Info", message=f"Your input '{tweet_a_categoriser}' has been classsified as : {classes_labels[int(classification)]}")
if selection == 'naive_classification':
get_user_input_for_single_classification()
tweet_a_categoriser = single_input_classification.get()
cleaner = Csv_Cleaner(is_single_input=True, single_input=tweet_a_categoriser)
tweet_a_categoriser_clean = cleaner.clean()
nc = NaiveClassification(tweet_a_categoriser_clean, True)
classification = nc.classify()
print(classification)
messagebox.showinfo(title="Info", message=f"Your input '{tweet_a_categoriser}' has been classsified as : {classes_labels[int(classification)]}")
def kfold_indices(data, k=5):
data = data.sample(frac=1)
fold_size = len(data) // k
indices = np.arange(len(data))
folds = []
for i in range(k):
test_indices = indices[i * fold_size: (i + 1) * fold_size]
train_indices = np.concatenate([indices[:i * fold_size], indices[(i + 1) * fold_size:]])
validation_set = data.loc[test_indices].reset_index(drop=True)
training_set = data.loc[train_indices].reset_index(drop=True)
folds.append((training_set, validation_set))
return folds
def train_test_split(data, test_size=0.2, random_seed=None):
if random_seed is not None:
random.seed(random_seed)
test_size = int(len(data) * test_size)
shuffled_data = random.sample(data, len(data))
test_set = shuffled_data[:test_size]
train_set = shuffled_data[test_size:]
return train_set, test_set
def train_model():
"""tests the model based on the parameters that were input by the user.
"""
selection = algo_var.get()
if selection == 'naive_bayes':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
classifications_validation = []
df_train = copy.deepcopy(active_dataset)
## selection du modele
#user_selection_model_parameter(algo_var.get())
## cross validation
fold_indices = kfold_indices(df_train, 3)
variante1 = ["présence", "fréquence"]
variante2 = ["avec", "sans"]
variante3 = ["uni-gramme", "bi-gramme", "both"]
cross_val_scores = {}
for j in range(len(variante1)):
for k in range(len(variante2)):
for l in range(len(variante3)):
for i, (train_set, val_set) in enumerate(fold_indices):
score_folds = []
classifications_validation = []
nb_model = NaiveBayes(train_set, variante1[j], variante2[k], variante3[l]) # model fitted on the training set
for tweet_token in val_set["Tweet_Tokenized"]:
tweet_a_categoriser = " ".join(literal_eval(tweet_token)) # model evaluated on the validation set
classifications_validation.append(nb_model.classification(tweet_a_categoriser))
val_set["model_class"] = classifications_validation
metric = Metrics(val_set, root, algo_var.get())
score_folds.append(metric.get_accuracy())
cross_val_scores[f"[{variante1[j]}, {variante2[k]}, {variante3[l]}]"]=(np.mean(score_folds))
print(cross_val_scores)
max_key = max(cross_val_scores, key=cross_val_scores.get)
max_value = cross_val_scores[max_key]
metric.display(f"Model : {max_key} -> {max_value}")
elif selection == 'knn':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a training dataset in the CSV viewer")
else:
classifications_validation = []
df_train = copy.deepcopy(active_dataset)
## cross validation
cross_val_scores = {}
fold_indices = kfold_indices(df_train, k=2)
variante1 = [1,2,3]
variante2 = ["jaro"]#, "lcs", "damerau_levenshtein", "jaro", "cosine", "jaccard", "sorensen_dice", "qgram_dice"]
variante3 = ["pondéré", "majoritaire"]
cross_val_scores = {}
for j in range(len(variante1)):
for k in range(len(variante2)):
for l in range(len(variante3)):
for i, (train_set, val_set) in enumerate(fold_indices):
score_folds = []
classifications_validation = []
knn = KNN(train_set, variante1[j], variante2[k], variante3[l]) # model fitted on the training set
for tweet_token in val_set["Tweet_Tokenized"]:
tweet_a_categoriser = " ".join(literal_eval(tweet_token)) # model evaluated on the validation set
classifications_validation.append(knn.classification(tweet_a_categoriser))
val_set["model_class"] = classifications_validation
metric = Metrics(val_set, root, algo_var.get())
score_folds.append(metric.get_accuracy())
cross_val_scores[f"[{variante1[j]}, {variante2[k]}, {variante3[l]}]"]=(np.mean(score_folds))
print(cross_val_scores)
max_key = max(cross_val_scores, key=cross_val_scores.get)
max_value = cross_val_scores[max_key]
metric.display(f"Model : {max_key} -> {max_value}")
elif selection == 'naive_classification':
if isinstance(get_active_dataset(), type(None)):
messagebox.showwarning(title="Warning", message="Please load a dataset in the CSV viewer")
else:
nc = NaiveClassification(active_dataset)
classified_df = nc.get_classified()
set_active_dataset(classified_df)
metric = Metrics(classified_df, root, algo_var.get())
metric.display(train=False)
def show_model_stats():
"""Displays metrics computed on the active dataset
"""
selection = algo_var.get()
if selection=="knn":
metrics = Metrics(active_dataset, parent=root, model=selection)
metrics.display(train=False)
if selection=="naive_bayes":
metrics = Metrics(active_dataset, parent=root, model=selection)
metrics.display(train=False)
if selection=="naive_classification":
metrics = Metrics(active_dataset, parent=root, model=selection)
metrics.display(train=False)
# ENTRY POINT OF THE GUI IMPLEMENTATION
splash_root.destroy()
root = ctk.CTk()
root.geometry("1000x900")
root.title("X(Twitter) Sentiment Analysis - GUI")
paned_window = tk.PanedWindow(root, orient="vertical", borderwidth=0)
paned_window.pack(fill='both', expand=True)
upper_frame = ctk.CTkFrame(paned_window, width=600, height=600, border_width=0)
middle_frame = ctk.CTkFrame(paned_window, width=600, height=600, border_width=0)
###########
# STYLES #
###########
# Create style Object
style = ttk.Style(upper_frame)
# set ttk theme to "clam" which support the fieldbackground option
style.theme_use("clam")
style.configure("Treeview", background="white",
fieldbackground="#1e1b24", foreground="black")
style.configure("TPanedwindow", background="black")
############
# FRAMES ###
############
#upper frame
tree = ttk.Treeview(upper_frame, show="headings")
tree.pack(padx=20, pady=20, fill="both", expand=True)
status_label = ctk.CTkLabel(upper_frame, text="", padx=20, pady=10)
status_label.pack()
open_button = ctk.CTkButton(upper_frame, text="Open CSV file",command=open_csv_file)
open_button.pack(padx=20, pady=10)
#middle frame
csv_editor = Application(middle_frame)
csv_editor.pack()
open_button = ctk.CTkButton(middle_frame, text="Edit CSV file",command=csv_editor.loadCells)
open_button.pack(padx=50, pady=10)
menubar = Menu(root, background='#1E1B24', fg='white')
filemenu = Menu(menubar, tearoff=0, background='#1E1B24', fg='white')
filemenu.add_command(label="New", command=csv_editor.newCells) # add save dialog
# add save dialog
filemenu.add_command(label="Open", command=csv_editor.loadCells)
filemenu.add_command(label="Save as", command=csv_editor.saveCells)
filemenu.add_command(label="Exit", command=csv_editor.quit)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_command(label="Exit", command=csv_editor.quit)
root.config(menu=menubar, background="#1E1B24")
default_font = font.nametofont("TkTextFont")
default_font.configure(family="Helvetica")
root.option_add("*Font", default_font)
# down frame
algo_var = tk.StringVar()
algoFrame = ctk.CTkFrame(paned_window, width=600, height=600, border_width=0)
algoFrame.grid(column=0, row=0, padx=20, pady=20)
# create a radio button
naive_classif = ctk.CTkRadioButton(algoFrame, text='Dictionnary', value='naive_classification', variable=algo_var)
naive_classif.grid(column=0, row=0, ipadx=10, ipady=10)
knn = ctk.CTkRadioButton(algoFrame, text='KNN', value='knn', variable=algo_var)
knn.grid(column=1, row=0, ipadx=10, ipady=10, sticky=tk.E)
naive_bayes = ctk.CTkRadioButton(algoFrame, text='Naive Bayes', value='naive_bayes', variable=algo_var)
naive_bayes.grid(column=2, row=0, ipadx=10, ipady=10, sticky=tk.NS)
###########
buttonFrame = ctk.CTkFrame(paned_window, width=600, height=600, border_width=0)
buttonFrame.grid(column=0, row=0, padx=20, pady=20)
train = ctk.CTkButton(buttonFrame, text="Train model", command=train_model)
train.grid(column=0, row=0, ipadx=10, ipady=10)
test = ctk.CTkButton(buttonFrame, text="Test model on dataset", command=test_model_dataset)
test.grid(column=1, row=0, ipadx=10, ipady=10)
test_single_sentence = ctk.CTkButton(buttonFrame, text="Test on single input", command=test_model_single_input)
test_single_sentence.grid(column=2, row=0, ipadx=10, ipady=10)
stats = ctk.CTkButton(buttonFrame, text="Stats", command=show_model_stats)
stats.grid(column=3, row=0, ipadx=10, ipady=10)
paned_window.paneconfig(middle_frame, width=1000, height=450)
paned_window.paneconfig(upper_frame, width=1000, height=400)
paned_window.paneconfig(algoFrame, width=1000, height=50)
paned_window.paneconfig(buttonFrame, width=1000, height=50)
paned_window.add(middle_frame)
paned_window.add(upper_frame)
paned_window.add(algoFrame)
paned_window.add(buttonFrame)
root.mainloop()
# Set Interval
splash_root.after(1000, main)
# Execute tkinter
splash_root.mainloop() | Jakcrimson/pjeb_twitter_sentiment_analysis | gui/xsa.py | xsa.py | py | 26,096 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "customtkinter.set_appearance_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "customtkinter.set_default_color_theme",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.insert",
"line_number": 11,
"usage_type": "call"
},
{
... |
42194323986 | import datetime
import math
from sqlalchemy import desc, asc
from app.main import db
from app.main.model.product_price_history import ProductPriceHistory
from app.main.model.product import Product
def save_product_price_history(data):
errors = {}
# Check null
if data['effective_date'] == "":
errors['effective_date'] = ['Price updated date must not be null!']
if data['original_price'] == "":
errors['original_price'] = ['Product original price must not be null!']
if data['sale_price'] == "":
errors['sale_price'] = ['Product sale price must not be null!']
if data['product_id'] == "":
errors['product_id'] = ['Product ID must not be null!']
# Check type
if data['original_price'].isnumeric() is False:
errors['original_price'] = ['Product original price is not valid!']
if data['sale_price'].isnumeric() is False:
errors['sale_price'] = ['Product sale price is not valid!']
if data['product_id'].isnumeric() is False:
errors['product_id'] = ['Product ID is not valid!']
# Check if foregin key is valid or not
product = Product.query.filter_by(id=data['product_id']).first()
if not product:
errors['product_id'] = ['Product ID does not exist']
if len(errors) > 0:
response_object = {
'status': 'FAILED',
'message': 'Failed to create a new product price history!',
'errors': errors
}
return response_object, 200
else:
# effective date and product_id cannot be the same at once record
product_price = ProductPriceHistory.query.filter_by(
product_id=data['product_id'],
effective_date=data['effective_date']).first()
if product_price:
errors['product price'] = 'Product price is already existed!'
response_object = {
'status': 'FAILED ',
'message': 'Failed to create a new product price history!',
'errors': errors
}
return response_object, 200
else:
new_price_history = ProductPriceHistory(
original_price=data['original_price'],
sale_price=data['sale_price'],
effective_date=datetime.datetime.strptime(
data['effective_date'], '%Y-%m-%d'),
product_id=data['product_id'],
created_on=datetime.datetime.utcnow(),
updated_on=datetime.datetime.utcnow()
)
db.session.add(new_price_history)
db.session.commit()
output = {}
output['id'] = str(new_price_history.id)
output['original_price'] = str(new_price_history.original_price)
output['sale_price'] = str(new_price_history.sale_price)
output['effective_date'] = str(
new_price_history.effective_date)
output['product_id'] = str(new_price_history.product_id)
output['created_on'] = str(new_price_history.created_on)
output['updated_on'] = str(new_price_history.updated_on)
response_object = {
'status': 'SUCCESS',
'message': 'A new product price history is created successfully!',
'data': output
}
return response_object, 201
def get_all_product_price_history():
all_price_history = ProductPriceHistory.query.all()
output = []
for price_history in all_price_history:
price_history_data = {}
price_history_data['id'] = str(price_history.id)
price_history_data['original_price'] = str(
price_history.original_price)
price_history_data['sale_price'] = str(price_history.sale_price)
price_history_data['effective_date'] = str(
price_history.effective_date)
price_history_data['product_id'] = str(price_history.product_id)
price_history_data['created_on'] = str(price_history.created_on)
price_history_data['updated_on'] = str(price_history.updated_on)
output.append(price_history_data)
data = {}
data['product_price_history'] = output
respone_object = {
'status': 'SUCCESS',
'message': 'Sucessfully getting information of all product price!',
'data': data
}
return respone_object, 200
def get_product_price_history(id):
errors = {}
product_price_history = ProductPriceHistory.query.filter_by(id=id).first()
if not product_price_history:
errors['ID'] = ['Product Price History does not exist']
response_object = {
'status': 'FAILED',
'message': 'Fail to get product price history',
'errors': errors
}
return response_object, 200
price_history_data = {}
price_history_data['id'] = str(
product_price_history.id)
price_history_data['original_price'] = str(
product_price_history.original_price)
price_history_data['sale_price'] = str(
product_price_history.sale_price)
price_history_data['effective_date'] = str(
product_price_history.effective_date)
price_history_data['product_id'] = str(
product_price_history.product_id)
price_history_data['created_on'] = str(
product_price_history.created_on)
price_history_data['updated_on'] = str(
product_price_history.updated_on)
response_object = {
'status': 'SUCCESS',
'message': 'Sucessfully getting information of product price history!',
'data': price_history_data
}
return response_object, 200
def delete_product_price_history(id):
product_price_history = ProductPriceHistory.query.filter_by(
id=id).first()
if not product_price_history:
respone_object = {
'status': 'ERROR',
'message': 'Product price history does not exist!'
}
return respone_object, 200
else:
db.session.delete(product_price_history)
db.session.commit()
response_object = {
'status': 'SUCCESS',
'message': 'Successfully deleted the product log!'
}
return response_object, 200
def update_product_price_history(id, data):
product_price_history = ProductPriceHistory.query.filter_by(id=id).first()
is_updated = False
errors = {}
if not product_price_history:
errors['id'] = ["Product price history ID does not exist!"]
response_object = {
'status': 'FAILED',
'message': 'Cannot get Product price history!',
'errors': errors
}
return response_object, 200
else:
# Check null
if data['product_id'] == "":
errors['product_id'] = ['Product ID must not be null!']
if data['original_price'] == "":
errors['original_price'] = [
'Product original price must not be null!']
if data['sale_price'] == "":
errors['sale_price'] = ['Product sale price must not be null!']
if data['effective_date'] == "":
errors['effective_date'] = [
'Price effective date must not be null!']
# Check if foregin key is valid or not
product = Product.query.filter_by(id=data['product_id']).first()
if not product:
errors['product_id'] = ["Product ID does not exist!"]
if (len(errors) > 0):
response_object = {
'status': 'FAILED',
'message': "Can not update product price history!",
'errors': errors
}
return response_object, 200
else:
# effective date and product_id cannot be the same at once record
product_price = ProductPriceHistory.query.filter_by(
product_id=data['product_id'],
effective_date=data['effective_date']).first()
if (product_price and
(product_price.product_id != str(data['product_id']) or
product_price.effective_date != datetime.datetime.strptime(
data['effective_date'], '%Y-%m-%d'))):
errors['effective_date'] = [
"Effective date and product_id is already existed"]
response_object = {
'status': 'FAILED',
'message': "Can not update product price history!",
'errors': errors
}
return response_object, 200
else:
if data['product_id'] != str(product_price.product_id):
is_updated = True
product_price.product_id = data['product_id']
if data['original_price'] != product_price.original_price:
is_updated = True
product_price.original_price = data['original_price']
if data['sale_price'] != product_price.sale_price:
is_updated = True
product_price.sale_price = data['sale_price']
if data['effective_date'] != product_price.effective_date:
is_updated = True
product_price.effective_date = datetime.datetime.strptime(
data['effective_date'], '%Y-%m-%d')
if is_updated is True:
product_price.updated_on = datetime.datetime.utcnow()
db.session.commit()
product_price_data = {}
product_price_data['id'] = str(product_price.id)
product_price_data['original_price'] = str(
product_price.original_price)
product_price_data['sale_price'] = str(product_price.sale_price)
product_price_data['effective_date'] = str(
product_price.effective_date)
product_price_data['product_id'] = str(product_price.product_id)
product_price_data['created_on'] = str(product_price.created_on)
product_price_data['updated_on'] = str(product_price.updated_on)
response_object = {
'status': 'SUCCESS',
'message': "Successfully updated product price history!",
'data': product_price_data
}
return response_object, 200
def get_product_price_history_with_pagination(args):
# Query Params
page_size = 10
current_page = 1
next_page = False
key_word = None
sort_field = None
sort_order = -1
# Check query param value
if "page_size" in args:
page_size = int(args['page_size'])
if "current_page" in args:
current_page = int(args['current_page'])
if "key_word" in args:
key_word = args['key_word'].lower()
if "sort_field" in args:
sort_field = args['sort_field']
if "sort_order" in args:
sort_order = int(args['sort_order'])
# Sort by order value
if sort_field is None or sort_order is None:
'''Default order by the lasted created_on value'''
product_price_history = ProductPriceHistory.query.order_by(
ProductPriceHistory.created_on.desc())
else:
if sort_order == -1:
product_price_history = ProductPriceHistory.query.order_by(
desc(sort_field))
else:
product_price_history = ProductPriceHistory.query.order_by(
asc(sort_field))
product_price_history_on_page = product_price_history.limit(
page_size).offset((current_page - 1) * page_size)
total_pages = math.ceil(product_price_history.count() / page_size)
if math.ceil(product_price_history.count() - page_size*current_page > 0):
next_page = True
else:
next_page = False
output = []
for product_price in product_price_history_on_page:
product = Product.query.filter_by(
id=product_price.product_id).first()
# Sort by keyword
if (key_word is not None):
if (key_word in str(product_price.original_price)) or (
key_word in str(product_price.sale_price)) or (
key_word in product.name.lower()):
product_price_data = {}
product_price_data['id'] = str(
product_price.id)
product_price_data['original_price'] = str(
product_price.original_price)
product_price_data['sale_price'] = str(
product_price.sale_price)
product_price_data['effective_date'] = str(
product_price.effective_date)
product_price_data['product'] = {}
product_price_data['product']['id'] = str(
product.id)
product_price_data['product']['name'] = str(
product.name)
output.append(product_price_data)
else:
product_price_data = {}
product_price_data['id'] = str(
product_price.id)
product_price_data['original_price'] = str(
product_price.original_price)
product_price_data['sale_price'] = str(
product_price.sale_price)
product_price_data['effective_date'] = str(
product_price.effective_date)
product_price_data['product'] = {}
product_price_data['product']['id'] = str(
product.id)
product_price_data['product']['name'] = product.name
output.append(product_price_data)
data = {}
data['products_price_history'] = output
data['total_pages'] = total_pages
data['current_page'] = current_page
data['has_next_page'] = next_page
response_object = {
'status': 'SUCCESS',
'message': 'Sucessfully getting information of all products price history',
'data': data
}
return response_object, 200
| viettiennguyen029/recommendation-system-api | app/main/service/product_price_history_service.py | product_price_history_service.py | py | 14,012 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.main.model.product.Product.query.filter_by",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "app.main.model.product.Product.query",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "app.main.model.product.Product",
"line_number": 37,
... |
73323091304 | from django.shortcuts import render
from Process.models import *
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model,login,logout,authenticate
from django.http import HttpResponse
from django.shortcuts import redirect
from datetime import datetime
from django.contrib.auth.models import Group
from pathlib import Path
from .forms import UploadFileForm
from mail import send_email
import csv
import uuid
import os
BASE_DIR = Path(__file__).resolve().parent.parent
# Create your views here.
def login_view(request):
try:
com_group = Group.objects.get(name = "commission")
except:
usermodel = get_user_model()
com_user = usermodel()
com_user.email = "commission@mail.com"
com_user.licence = "commission"
com_user.set_password(raw_password="1234")
com_user.save()
com_group = Group.objects.create(name = "commission")
com_user.groups.add(com_group)
if request.method == 'POST':
email = request.POST['email']
password = request.POST['password']
user = authenticate(email = email, password = password)
next_url = request.GET.get('next')
if user is not None:
login(request,user)
if next_url:
print(next_url)
return redirect(next_url)
else:
return HttpResponse("You are now logged in.")
else:
return HttpResponse("Bad credentials.")
else:
return render(request,"login.html")
### DONE rework filters and data packing to include filtering by groups. will need to test.
## DONE add logout option
## DONE finish work on the VOTING phase.
## DONE make a check BEFORE entering the voting page to see if the user is on the same phase as the election.
@login_required
def voting_view(request,key):
if request.method == 'POST':
currentuser = request.user
user = key_of.objects.get(key = key).user
if user == currentuser:
if request.POST.get("logout"):
logout(request)
return redirect(request.get_full_path())
elif user.votestatus == election.objects.latest('id').Phase and election.objects.latest('id').Phase != 2 :
votees = [value for key, value in request.POST.items() if key.startswith('checkbox')]
if len(votees) > 5:
request = HttpResponse("no.")
return request
elif len(votees) < 1:
request = HttpResponse("Nuh-uh.")
return request
else:
for item in votees:
usermodel = get_user_model()
newballot = ballot()
newballot.votes = usermodel.objects.get(id = item)
newballot.votetype = election.objects.latest('id').Phase
newballot.election = election.objects.latest('id')
newballot.save()
user.votestatus += 1
user.save()
send_email(email = user.email,advance = "personal")
# DONE put email system here that confirms that someone successfully voted.
response = HttpResponse("Successfully voted.")
return response
else:
response = HttpResponse("You have already voted for the current step of the election.")
return response
else:
response = HttpResponse("You are trying to vote on someone elses ballot.")
return response
try:
if request.method == 'GET':
currentuser = request.user
if currentuser == key_of.objects.get(key = key).user:
if election.objects.latest('id').Phase == 0:
if currentuser.votestatus == 0:
#pulling info from database here v
group = Group.objects.get(name = election.objects.latest('id').name_of_election)
user = key_of.objects.get(key = key).user
local_cent = user.is_within_set.latest('id').local_center
votees = [is_within.user for is_within in is_within.objects.filter(local_center = local_cent) if is_within.user.groups.filter(name = group.name).exists()]
voteesdict = {}
for votee in votees:
member = {}
name = f"{votee.first_name} {votee.last_name}"
member['name'] = name
member['id'] = votee.id
member['licence'] = votee.licence
voteesdict[f"votee_{len(voteesdict)}"] = member
currentuser = { 'local_center':local_cent.name, 'name':f"{user.first_name} {user.last_name}", 'licence' : user.licence}
context = { 'currentuser': currentuser , 'key':key, 'votees' : voteesdict }
return render(request, 'ballot.html', context = context)
else:
return HttpResponse("You have already voted.")
if election.objects.latest('id').Phase == 1:
if currentuser.votestatus == 1:
group = Group.objects.get(name = election.objects.latest('id').name_of_election)
user = key_of.objects.get(key = key).user
local_cent = user.is_within_set.latest('id').local_center
#check this line if no users show up in phase 1
votees = sorted([ iswithin.user for iswithin in is_within.objects.filter(local_center = local_cent) if iswithin.user.groups.filter(name = group.name).exists() and ballot.objects.filter(votes = iswithin.user,election = election.objects.latest('id') , votetype = 0).exists() ],key = lambda member: ballot.objects.filter(votes = member,election = election.objects.latest('id'),votetype = 0).count(), reverse=True )
# [is_within.user for is_within in is_within.objects.filter(local_center = local_cent) if is_within.user.groups.filter(name = group.name).exists() and ballot.objects.filter(votes = is_within.user, votetype = 1, election = election.objects.latest('id')).exists()]
# sorted([ iswithin.user for iswithin in iswithinset if iswithin.user.groups.filter(name = group.name).exists() ],key = lambda member: ballot.objects.filter(votes = member,election = election.objects.latest('id'),votetype = votetype).count(), reverse=True )
voteesdict = {}
for votee in votees:
member = {}
name = f"{votee.first_name} {votee.last_name}"
member['name'] = name
member['id'] = votee.id
member['licence'] = votee.licence
member['votes'] = ballot.objects.filter(votes = votee, votetype = 1, election = election.objects.latest('id')).count()
voteesdict[f"votee_{len(voteesdict)}"] = member
currentuser = { 'local_center':local_cent.name, 'name':f"{user.first_name} {user.last_name}", 'licence' : user.licence}
context = { 'currentuser': currentuser , 'key':key, 'votees' : voteesdict }
# # to be later ordered in html with {% regroup %
#TODO ...make ballot2.
return render(request, 'ballot.html', context = context)
else:
return HttpResponse("You have already voted.")
if election.objects.latest('id').phase == 2:
response = HttpResponse("There is no election going on right now.")
return response
else:
response = HttpResponse("You are trying to access a ballot which does not belong to you.")
return response
except:
return HttpResponse("Incorrect Key")
## DONE setup commision user.
#DONE add a check to see if the logged in user is part of the commision group
@login_required
def Commission_view(request):
#DONE make the displayed current election results based on the current phase of the election. ex: if vote phase is 1, filter all votes by vote phase 1 and only display those results.
# the commision check V
if request.user.groups.filter(name = "commission").exists():
if request.method == "POST":
#AKA if you selected an election, display that election's votes.
form = UploadFileForm(request.POST,request.FILES)
#DONE add a filter for the voting phase of the election instead of pulling all of the ballots. maybe? add both types of votes into the display.
#DONE add a query to the following code that looks up groups tied to each election and only pulls voters from those.
# must check if the query works later.
if request.POST.get("election"):
electionInstance = election.objects.get(id = request.POST["election"])
if electionInstance == election.objects.latest('id') and electionInstance.Phase != 2:
return redirect(Commission_view)
if electionInstance.Phase == 2:
votetype = 1
else:
votetype = electionInstance.Phase
date = electionInstance.date
votes = {}
regions = [item for item in regional_center.objects.all()]
history_records = is_within.history.filter(history_date__lte=date)
iswithins = [iswithin.instance for iswithin in history_records]
for region in regions:
votes[region] = {}
locals = [item for item in region.local_center_set.all()]
for local in locals:
votes[region][local] = {}
group = Group.objects.get(name = electionInstance.name_of_election)
##iswithin object
## the following just filters the members by information on if they were in the election selected based on their groups.
members = sorted([ iswithin.user for iswithin in iswithins if iswithin.user.groups.filter(name = group.name).exists() and iswithin.group == group and iswithin.local_center == local ],key = lambda member: ballot.objects.filter(votes = member,election = electionInstance,votetype = votetype).count(), reverse=True )
for member in members:
# Ballot model got changed so it keeps track of which election it is tied to.
count = ballot.objects.filter(votes = member,election = electionInstance,votetype = votetype).count()
votes[region][local][member] = count
elections = [item for item in election.objects.all()]
usermodel = get_user_model()
votercount = usermodel.objects.filter(groups__in = [group]).count()
context = {'votes' : votes, 'elections' : elections, 'election':electionInstance, 'history': "yep.",'votercount':votercount, 'form':UploadFileForm()}
return render(request, 'commission.html', context = context)
elif request.POST.get("advance"):
#DONE make a function that will generate a new key for each user after each voting step, and then email that information again.
try:
LatestElectionPhase = election.objects.latest('id').Phase
except Exception as e:
LatestElectionPhase = 2
print(e)
if request.POST.get("name_of_election") and LatestElectionPhase == 2:
# important step I (deletes all keys from previous election.)
# V
key_of.objects.all().delete()
file = BASE_DIR/"Resources/candidatelist.csv" ### DONE this needs to be changed to the file that the csv upload points to as well.
try:
data = csv.reader(open(file),delimiter =",")
except:
return HttpResponse("No CSV file uploaded.")
finlist = {}
for row in data:
if row[0] != "first_name":
if row[3] not in finlist:
finlist[row[3]] = []
if row[4] not in finlist[row[3]]:
finlist[row[3]].append(row[4])
print(finlist)
# populates database with local and regional centers and links them together
list = [key for key in dict.keys(finlist)]
for region in list:
try:
reg = regional_center.objects.get(name = region)
except Exception as e:
print(e)
reg = regional_center()
reg.name = region
reg.save()
for center in finlist[region]:
try:
loc = reg.local_center_set.get(name = center)
except Exception as e:
print(e)
loc = local_center()
loc.name = center
loc.regional_center = regional_center.objects.get(name = region)
loc.save()
#creating new election and group tied to election.
name = request.POST["name_of_election"]
group = Group.objects.create(name = name)
new_election = election()
new_election.name_of_election = name
new_election.Phase = 0
new_election.date = datetime.now()
new_election.save()
new_election.groups.add(group)
#reading provided CSV file and generating/editing users.
file = BASE_DIR/"Resources/candidatelist.csv" ### DONE this is going to be the file that is defined by the function that uploads the csv file in the first place
data = csv.reader(open(file),delimiter =",")
usermodel = get_user_model()
licences = [item.licence for item in usermodel.objects.all()]
for row in data:
if row[0] != "first_name":
if row[5] not in licences:
model = usermodel()
model.first_name = row[0]
model.last_name = row[1]
password = usermodel.objects.make_random_password(length = 10)
### here is where you pass the password to the email script.
model.set_password(raw_password=password)
email = row[2]
model.email = email
### here you pull the email address for the email script.
model.licence = row[5]
model.votestatus = 0
model.save()
model.groups.add(group) #adds the new model to the group of the new election.
send_email(email = email,password = password)
### this generates keys for all users for the current election.
keyof = key_of()
keyof.user = model
key = uuid.uuid4()
keyof.key = key
keyof.save()
regionalcent = regional_center.objects.get(name = row[3])
localcent = regionalcent.local_center_set.get(name = row[4])
isw = is_within()
isw.user = model
isw.local_center = localcent
isw.group = group
isw.save()
send_email(email = email, key = key, advance = "start")
else:
user = usermodel.objects.get(licence = row[5])
regionalcent = regional_center.objects.get(name = row[3])
localcent = regionalcent.local_center_set.get(name = row[4])
isw = is_within.objects.get(user = user)
isw.local_center = localcent
isw.group = group
isw.save()
user.votestatus = 0
user.save()
user.groups.add(group)#adds the existing user model to the group of the new election.
#generating new keys for users that exist in database.
keyof = key_of()
keyof.user = user
key = uuid.uuid4()
keyof.key = key
keyof.save()
send_email(email = user.email, key = key, advance = "start")
new_election.date = datetime.now()
new_election.save()
return redirect(Commission_view)
elif election.objects.latest('id').Phase == 0:
#wiping all keys used in the candidacy part of the election, to generate brand new ones.
key_of.objects.all().delete()
elec = election.objects.latest('id')
elec.Phase = 1
elec.save()
usermodel = get_user_model()
group = Group.objects.get(name = elec.name_of_election)
voters = usermodel.objects.filter(groups__in = [group])
for voter in voters:
voter.votestatus = 1
voter.save()
keyof = key_of()
keyof.user = voter
key = uuid.uuid4()
keyof.key = key
keyof.save()
send_email(email = voter.email, key = key, advance = "advance")
## Send emails here to access the Voting phase.
return redirect(Commission_view)
elif election.objects.latest('id').Phase == 1:
#wiping key database again just in case. can be removed later if it causes errors.
key_of.objects.all().delete()
elec = election.objects.latest('id')
elec.Phase = 2
elec.save()
## query all users with the group that has the name of the last election then iset their votestatus to 2
usermodel = get_user_model()
group = Group.objects.get(name = elec.name_of_election)
voters = usermodel.objects.filter(groups__in = [group])
for voter in voters:
voter.votestatus = 2
voter.save()
return redirect(Commission_view)
else:
return HttpResponse("Does not return name of election, but current phase is 2.")
elif form.is_valid():
csv_file = request.FILES['file']
file_path = BASE_DIR/"Resources/candidatelist.csv"
if os.path.exists(file_path):
os.remove(file_path)
with open(file_path, 'wb+') as destination:
for chunk in csv_file.chunks():
destination.write(chunk)
return redirect(Commission_view)
elif request.POST.get("logout"):
logout(request)
return redirect(Commission_view)
else:
return redirect(Commission_view)
if request.method == "GET":
usermodel = get_user_model()
try:
votes = {}
regions = [region for region in regional_center.objects.all()]
electionInstance = election.objects.latest('id')
try:
group = Group.objects.get(name = electionInstance.name_of_election)
votercount = usermodel.objects.filter(groups__in = [group]).count()
except Exception as e:
print(e)
votercount = 0
if election.objects.latest('id').Phase != 2:
votetype = election.objects.latest('id').Phase
else:
votetype = 1
for region in regions:
votes[region] = {}
locals = [local for local in region.local_center_set.all()]
for local in locals:
votes[region][local] = {}
group = Group.objects.get(name = election.objects.latest('id').name_of_election)
iswithinset = [item for item in local.is_within_set.all()]
members = sorted([ iswithin.user for iswithin in iswithinset if iswithin.user.groups.filter(name = group.name).exists() ],key = lambda member: ballot.objects.filter(votes = member,election = election.objects.latest('id'),votetype = votetype).count(), reverse=True )
for member in members:
count = ballot.objects.filter(votes = member, votetype = votetype , election = election.objects.latest('id')).count()
votes[region][local][member] = count
except Exception as e:
print(e)
electionInstance = "None"
votercount = 0
elections = [item for item in election.objects.all()]
context = {'votes' : votes,'elections' : elections, 'election' : electionInstance, 'votercount':votercount, 'form': UploadFileForm()}
return render(request, 'commission.html', context = context)
#DONE make commision page.
else:
return HttpResponse("You are not a member of the commission.")
### when creating a new election, link it to a group (requires adding a new field to election model) DONE
### make a script (that runs when new election is created) that will check if all users provided in a given csv
## file are in the database (if not will generate new ones.), and put them in the current elections group. DONE
### when showing user votes in commision view filter the users by group they belong to DONE | ZidbeRR/Projekat-Grupa-2 | projekat/eIzbori/Process/views.py | views.py | py | 24,794 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Group.objects.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.Group.objects",
"line_number": 23,
"usage_type": "at... |
8860145335 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
seeds = [6969, 4242, 6942, 123, 420, 5318008, 23, 22, 99, 10]
delta_vals = [0.1, 0.2, 0.3, 0.4, 0.6, 0.7, 0.8, 0.9, 0.001, 0.005, 0.01, 0.05, 0.5, 1, 2, 3, 5]
delta_vals = [1, 2, 3, 5]
root_folder = 'sumo/test_results'
test_results = 'newoptim-linear-False-test_seed_5318008_robust_factor_-1/0.01-train_data.csv'
full_path = os.path.join(root_folder, test_results)
das_data = pd.read_csv(full_path)
delta_vals = [0.01,0.5]
seeds = [99, 22, 23, 10]
for delta in delta_vals[:2]:
for seed in seeds:
test_results = f'newoptim-linear-False-test_seed_{seed}_robust_factor_-1/{delta}-train_score_data.csv'
full_path = os.path.join(root_folder, test_results)
das_data = pd.read_csv(full_path)
plt.plot(das_data['scores'])
plt.xlabel("Episode")
plt.ylabel('Return')
plt.title(f'Episode Return during training, delta = {delta}, seed = {seed}')
plt.savefig(f'lossplots/Score-{delta}-{seed}.png')
plt.clf() | TheGoldenChicken/robust-rl | examineloss.py | examineloss.py | py | 1,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
10492814030 | import matplotlib.pyplot as plt
import pyk4a
from pyk4a import Config, PyK4A
import open3d as o3d
import time
import numpy as np
import copy
import cv2
from modern_robotics import *
if __name__ == "__main__":
k4a = PyK4A(
Config(
color_resolution=pyk4a.ColorResolution.RES_720P,
depth_mode=pyk4a.DepthMode.NFOV_UNBINNED,
synchronized_images_only=True
)
)
k4a.start()
# getters and setters directly get and set on device
k4a.whitebalance = 4500
assert k4a.whitebalance == 4500
k4a.whitebalance = 4510
assert k4a.whitebalance == 4510
idx = 0
pcd_list = []
capture_idx = 4
# Capture
capture = k4a.get_capture()
rgb = capture.color
# ROI 선택
x, y, w, h = cv2.selectROI(rgb)
# 포인트 클라우드 ROI 마스크 생성
mask = np.zeros(rgb.shape[:2], dtype=np.uint8)
mask[y:y+h, x:x+w] = 1
mask = mask.astype(np.int16)
indicies = np.where(mask==1)
mask = np.expand_dims(mask, axis=-1)
cv2.destroyAllWindows()
time.sleep(1)
coord_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, -1])
while True:
if idx == capture_idx:
break
print('capture idx {0}'.format(idx))
capture = k4a.get_capture()
rgb = capture.color
raw_pcd = capture.transformed_depth_point_cloud
cv2.imshow('Pointcloud capture', rgb)
if cv2.waitKey(0) == ord('s'):
idx += 1
rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)
rgb = rgb[:, :, :3].astype(np.float32) / 255
raw_pcd = raw_pcd * mask
rgb = rgb * mask
raw_pcd = np.reshape(raw_pcd, [-1, 3])
rgb = np.reshape(rgb, [-1, 3])
max_range_mask = np.where(np.logical_and(raw_pcd[:, 2]<550, raw_pcd[:, 2]>400))
raw_pcd = raw_pcd[max_range_mask]
rgb = rgb[max_range_mask]
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(raw_pcd)
pcd.colors = o3d.utility.Vector3dVector(rgb)
# Compute mean distance of points from origin
# distances = np.sqrt(np.sum(np.square(np.asarray(pcd.points)), axis=1))
# mean_distance = np.mean(distances)
# pcd.scale(1 / mean_distance, center=pcd.get_center())
# center = pcd.get_center()
# new_origin = [0, 0, 0]
# translation_vector = np.subtract(new_origin, center)
# pcd.translate(translation_vector)
pcd_list.append(pcd)
for i in range(len(pcd_list)):
print('save_pointcloud {0}'.format(i))
# Save point cloud
o3d.io.write_point_cloud('./4way_pointclouds/test_pointcloud_{0}.pcd'.format(i), pcd_list[i])
| chansoopark98/3D-Scanning | test_code/capture_by_button.py | capture_by_button.py | py | 2,887 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyk4a.PyK4A",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyk4a.Config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyk4a.ColorResolution",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pyk4a.DepthMode",... |
74004605223 | import os
from openai import OpenAI
from sentence_transformers import SentenceTransformer
from typing import Protocol
import numpy.typing as npt
import numpy as np
import config
class EmbeddingMaker(Protocol):
def encode(self, text: str) -> npt.NDArray[np.float32]:
...
class AI:
def __init__(self) -> None:
self.client: OpenAI = OpenAI(
api_key=os.environ["OPENAI_API_KEY"],
)
self.embedder: EmbeddingMaker = SentenceTransformer(config.model_name)
def encode(self, text: str) -> npt.NDArray[np.float32]:
return self.embedder.encode(text)
def ask1(self, prompt: str) -> str:
messages = [
{"role": "system", "content": "You are a document scanner."},
{"role": "user", "content": prompt},
]
chatgpt = self.client.chat.completions.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.3,
n=1,
)
results = chatgpt.choices[0].message.content
return results or ""
| capgoai/doc_search | api/ai.py | ai.py | py | 1,070 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Protocol",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.typing.NDArray",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.typing",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.float32"... |
29867512033 | import os
import json
import shelve
from jsonobject import JsonObject
from constants import *
class TranslationDB():
def __init__(self):
if CLEAN:
self.db = shelve.open(TRANSLATION_CACHE_NAME, flag='n')
self.db = shelve.open(TRANSLATION_CACHE_NAME)
if len(self.db) == 0:
for root, dirs, files in os.walk(TRANSLATION_DATA):
for json_file in files:
with open(os.path.join(root, json_file), 'r', encoding="utf-8") as f:
translations = json.load(f)
for o in translations:
self.db[o["key"]] = o["value"]
def data(self, name):
return self.db[name]
if __name__ == "__main__":
t = TranslationDB() | Jugbot/FEH-Automation | database/translationdb.py | translationdb.py | py | 782 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "shelve.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "shelve.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 17,... |
12366302032 | """Module containing a framework for unittesting of AMPLE modules"""
__author__ = "Felix Simkovic"
__date__ = "22 Mar 2016"
__version__ = "1.0"
import glob
import logging
import os
import sys
from ample.constants import AMPLE_DIR
from unittest import TestLoader, TextTestRunner, TestSuite
# Available packages. Hard-coded for now to show visually what we have in
# argparse module. Not needed otherwise
PACKAGES = ["ensembler", "modelling", "parsers", "util"]
logger = logging.getLogger(__name__)
def add_cmd_options(parser):
parser.add_argument(
'-b', dest='buffer', action="store_false", default=True, help="debugging by printing print messages"
)
parser.add_argument('test_cases', nargs='*', help="[ {0} ]".format(" | ".join(PACKAGES)))
parser.add_argument('-v', dest="verbosity", default=2, type=int, help="level of verbosity [default: 2]")
class AMPLEUnittestFramework(object):
"""Framework to run Ample unittesting"""
def run(self, buffer=False, cases=None, pattern="test*.py", verbosity=2):
"""Main routine for running the test cases"""
suite = SuiteLoader().load_suite(AMPLE_DIR, cases=cases, pattern=pattern)
if int(suite.countTestCases()) <= 0:
msg = 'Could not find any tests to run in directory: {0}'.format(AMPLE_DIR) + os.linesep
sys.stderr.write(msg)
sys.exit(1)
logging.disable(logging.CRITICAL)
TextTestRunner(verbosity=verbosity, buffer=buffer).run(suite)
logging.disable(logging.NOTSET)
class SuiteLoader(object):
"""Loader designed to obtain all test cases in a package"""
def load_suite(self, directory, pattern="test*.py", cases=None):
"""Load a unittest test suite"""
# If we do not have any test cases then we can search for some in
# the specified directory.
if not cases:
search_pattern = os.path.join(directory, "*")
cases = [os.path.basename(folder) for folder in glob.glob(search_pattern) if os.path.isdir(folder)]
return self._load_suite(cases, pattern, directory)
def _load_suite(self, cases, pattern, directory):
suite = TestSuite()
for case in cases:
path = os.path.join(directory, case)
try:
_suite = TestLoader().discover(path, pattern=pattern, top_level_dir=directory)
suite.addTests(_suite)
del _suite
except ImportError:
logger.debug("*** not a package: {0} ***".format(path))
return suite
| rigdenlab/ample | ample/testing/unittest_util.py | unittest_util.py | py | 2,557 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "ample.constants.AMPLE_DIR",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "ample.constants.AMPLE_DIR",
"line_number": 36,
"usage_type": "argument"
},
{
"api... |
21439179828 | import scrapy
class QuotesSpider(scrapy.Spider):
name = "reviews"
start_urls = [
'https://domicilios.com/restaurantes/bogota/ppc-suba-menu?t=comentarios',
]
def parse(self, response):
for review in response.css("#reviewList > li"):
yield {
'text': review.css("[itemprop='description']::text").get(),
'author': review.css("div.small").css('[itemprop="author"]::text').get(),
'date': review.css("[itemprop='datePublished']::text").get(),
'previous_orders': review.css("div.small::text")[1].get(),
'previous_reviews': review.css("div.small::text")[2].get(),
'worstRating': review.css("[itemprop='worstRating']::attr(content)").get(),
'ratingValue': review.css("[itemprop='ratingValue']::attr(content)").get(),
'bestRating': review.css("[itemprop='bestRating']::attr(content)").get()
}
# No es posible buscar un link para la siguiente página porque domicilios
# está escrito en JavaScript, por lo que hay que interactuar con un navegador automatizado | VolodyaCO/e-Marketing_Konrad2019 | spider.py | spider.py | py | 1,152 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 4,
"usage_type": "attribute"
}
] |
10524711951 | #!/usr/bin/python
import os
from os.path import expanduser
from dotenv import load_dotenv
import mysql.connector
def se_db_run_cmd(query,values):
home = expanduser("~")
config_path = home + "/.app_config/.env_db"
load_dotenv(dotenv_path=config_path)
try:
mydb = mysql.connector.connect(
host=os.getenv("MYSQL_HOST"),
user=os.getenv("MYSQL_USER"),
password=os.getenv("MYSQL_PASSWORD"),
database=os.getenv("MYSQL_DATABASE")
)
mycursor = mydb.cursor()
if isinstance(values, tuple):
mycursor.execute(query, values)
elif isinstance(values, list):
mycursor.executemany(query, values)
else:
print('Unsupported MySQL Datastructure!')
mydb.commit()
print(mycursor.rowcount, "record inserted.")
except mysql.connector.Error as err:
print("MySQL Error: {}".format(err))
if __name__ == "__main__":
se_db_run_cmd()
| jsanchezdevelopment/se_project | se_test/se_db/se_db_run_cmd.py | se_db_run_cmd.py | py | 926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.expanduser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "mysql.connector.connector.connect",
"line_number": 14,
"usage_type": "call"
},
{
"api_name":... |
27410425777 | from io import UnsupportedOperation
import numpy as np
import unittest
class IntegerEncoder:
"""Encoder of integer values into a set of other encoding formats. (Binary and grey-decoded) are only supported now.
"""
def __init__(self) -> None:
return
def __binary_encoder(self, value: int, number_of_bits: int = 32) -> list:
""" a function to encode a given integer into its binary representation or 2's complement if a -ve integer was given.
:param value: The integer value to be encoded.
:type value: int
:param number_of_bits: the width of the binary representation
:type number_of_bits: int
:returns: list of integers as the binary representation of the given value
:rtype: list[int]
"""
return [int(bit) for bit in np.binary_repr(value , number_of_bits)]
def __grey_encoder(self, value: int , number_of_bits: int = 32) -> list:
""" a function to encode a given integer into its grey encoded representation.
:param value: The integer value to be encoded.
:type value: int
:param number_of_bits: the width of the representation
:type number_of_bits: int
:returns: list of integers as the grey encoded representation of the given value
:rtype: list[int]
"""
#gets the binary representation and reverse it to start with the Least bit
binary_repr = self.__binary_encoder(value , number_of_bits)
binary_repr.reverse()
grey_encoded = []
for i in range(len(binary_repr) - 1):
#the grey encoder formula is (gn = 1 - bn if bn == 1 else gn = bn)
if binary_repr[i + 1] == 1:
grey_encoded.append(1 - binary_repr[i])
else:
grey_encoded.append(binary_repr[i])
#Leave the last bit as it's
grey_encoded.append(binary_repr.pop())
grey_encoded.reverse()
return grey_encoded
def encode(self, value: int, number_of_bits: int = 32, type: str = "binary") -> list:
""" a function to encode a given integer into the slected encodings type
:param value: The integer value to be encoded.
:type value: int
:param number_of_bits: the width of the representation
:type number_of_bits: int
:param type: Type of encoding to be applied to the value.
:type type: str
:returns: list of integers as the encoded representation of the given value
:rtype: list[int]
"""
if type == "binary":
return self.__binary_encoder(value , number_of_bits)
elif type == "grey":
return self.__grey_encoder(value , number_of_bits)
else:
raise UnsupportedOperation("This type of encoding is not supported.")
class IntegerEncoderTest(unittest.TestCase):
def test_binary_encoding(self):
encoder = IntegerEncoder()
self.assertEqual(encoder.encode(0 , 4 , 'binary') , [0 , 0 , 0 , 0])
self.assertEqual(encoder.encode(15 , 8 , 'binary') , [0 , 0 , 0 , 0 , 1 , 1 , 1 , 1])
self.assertEqual(encoder.encode(128 , 4 , 'binary') , [1 , 0 , 0 , 0 , 0 , 0 , 0 , 0])
self.assertEqual(encoder.encode(0 , 64 , 'binary') , [0] * 64)
def test_grey_encoding(self):
encoder = IntegerEncoder()
self.assertEqual(encoder.encode(0 , 4 , 'grey') , [0 , 0 , 0 , 0])
self.assertEqual(encoder.encode(59 , 8 , 'grey') , [0 , 0 , 1 , 0 , 0 , 1 , 1 , 0])
self.assertEqual(encoder.encode(15 , 8 , 'grey') , [0 , 0 , 0 , 0 , 1 , 0 , 0 , 0])
self.assertEqual(encoder.encode(0 , 64 , 'grey') , [0] * 64)
if __name__ == '__main__':
unittest.main()
| skycoin/kcg-tiletool | int-encoder/IntegerEncoder.py | IntegerEncoder.py | py | 3,890 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.binary_repr",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "io.UnsupportedOperation",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "unittest.TestCase",
"line_number": 74,
"usage_type": "attribute"
},
{
"api_name": "unit... |
33930837671 | from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils import executor
from bot_keyboards import (first_start_keyboard, menu_keyboard)
TOKEN = '5669721003:AAHm1uNSZyJXRw43GZH84cvwTXsmjD833zY'
bot = Bot(token=TOKEN)
dp = Dispatcher(bot)
@dp.message_handler(commands=['start','help'])
async def send_welcome(msg: types.Message):
await msg.answer(f'Я ОЛХ бот, Привет,{msg.from_user.first_name}',
reply_markup=first_start_keyboard())
@dp.message_handler(lambda message: message.text == 'Отримати підписку')
async def start_info(message: types.Message):
await bot.send_message(
message.from_user.id, ''''В якості ознайомлення з ботом,
вам <b>Безкоштовний доступ</b> пошук за 1 посиланням
Для того, щоб скористатися сервісом необхідно:
1)Сформувати пошук на сайті <b>OLX</b>
2)Натиснути кнопку "Додати посилання"
3)Відправити посилання в повідомленні нижче''',
reply_markup=menu_keyboard(),
parse_mode='HTML'
)
@dp.message_handler(lambda message: message.text == 'Отримати підписку')
async def add_new_olx_link(message: types.Message):
await bot.send_message(
message.from_user.id,
'Введіть посилання'
)
@dp.message_handler(content_types=['text'])
async def add_link_step_2(message: types.Message):
print('t')
link = message.text.lower()
#
# @dp.message_handler(content_types=['text'])
# async def get_text_messages(msg: types.Message):
# if msg.text.lower() == 'привет':
# await msg.answer('Привет!')
# else:
# await msg.answer('Моя твоя не понимать!!!(')
if __name__ == '__main__':
executor.start_polling(dp) | MarietaKsenia/olxBot | repeat.py | repeat.py | py | 1,997 | python | uk | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.Bot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "aiogram.dispatcher.Dispatcher",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "... |
32512355098 | """this module contains methods for working with a web server
"""
from http.server import BaseHTTPRequestHandler, HTTPServer
import cgi
from search_engine import SearchEngine, Context_Window
import time
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
"""create an html page with a search button,
query, limit field and buttons for going forward
and backward through the files
"""
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
html = """
<html>
<body>
<form method="post">
<input type="text" name="query">
<input type="submit" value="Search">
<br>
<br>
<label for="limit">
show
<input type="number" name="limit">
documents on a page
</label>
</form>
</body>
</html>
"""
self.wfile.write(bytes(html, encoding="utf-8"))
def do_POST(self):
"""show search results as an ordered list of filenames
with an unordered list of quotes for each file
and the searched word(s) highlighted
"""
start_time = time.time()
form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,
environ={'REQUEST_METHOD': 'POST'})
# remember previous query to start next search with new parameters
pr_query = form.getvalue("pr_query")
query = str(form.getvalue("query"))
if pr_query == None or pr_query == "None":
pr_query = query
else:
if pr_query != query:
pr_query = "None"
doc_action = form.getvalue("action")
limit = form.getvalue("limit")
if not limit:
limit = 10
else:
limit = int(limit)
offset = form.getvalue("offset")
if not offset or int(offset) < 0 or pr_query == "None":
offset = 0
else:
offset = int(offset)
# specify actions when a button is pressed
if doc_action == "Previous" and offset != 0:
offset = offset - limit
elif doc_action == "Next":
offset = limit + offset
elif doc_action == "First Page":
offset = 0
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write(bytes("""
<html>
<body>
<form method="post">
<input type="text" name="query" value="%s"/>
<input type="submit" value="Search"/>
<input type="hidden" name="pr_query" value="%s"/>
<br>
<br>
<label for="limit">show
<input type="number" name="limit" value="%d"/>
documents on a page
</label>
<input type="hidden" name="offset" value="%d"/>
""" % (query, pr_query, limit, offset), encoding="utf-8"))
# create a list of pairs (limit, offset) for quotes
# in each document to pass it to the search function
n = 0
quotes_per_doc = []
check_quotes = []
while n < limit+1:
quote_action = form.getvalue("action%d" % n)
doc_limit = form.getvalue("doc%dlimit" % n)
doc_offset = form.getvalue("doc%doffset" % n)
if not doc_limit:
doc_limit = 3
else:
doc_limit = int(doc_limit)
if not doc_offset or pr_query == "None":
doc_offset = 0
elif doc_action == "Previous" or doc_action == "Next" or doc_action == "First Page":
doc_offset = 0
else:
doc_offset = int(doc_offset)
if doc_offset < 0:
doc_offset = 0
# specify actions when a button is pressed
if quote_action == "Previous" and doc_offset != 0:
doc_offset = doc_offset - doc_limit
elif quote_action == "Next":
doc_offset = doc_limit + doc_offset
elif quote_action == "To the beginning":
doc_offset = 0
quotes_per_doc.append((doc_limit+1, doc_offset))
n+=1
#search = self.server.search_engine.limit_quote_search(query, limit+1, offset, quotes_per_doc)
search = self.server.search_engine.limit_quote_context_search(query, limit+1, offset, quotes_per_doc)
sorted_key_list = sorted(search)
# ordered file list
self.wfile.write(bytes('<ol>', encoding="utf-8"))
if not search:
self.wfile.write(bytes('Not Found', encoding="utf-8"))
for i, filename in enumerate(sorted_key_list[:limit]):
self.wfile.write(bytes('<li><p>%s</p>' % filename, encoding="utf-8"))
# create limit and offset for each document
quote_limit = quotes_per_doc[i][0]
quote_offset = quotes_per_doc[i][1]
# field names that take into account
# the number of the document in the output
self.wfile.write(bytes("""
<label for="doc%dlimit">show
<input type="number" name="doc%dlimit" value="%d"/>
quotes
</label>
<input type="hidden" name="doc%doffset" value="%d"/>
""" % (i, i, quote_limit-1, i, quote_offset), encoding="utf-8"))
# unordered quote list
self.wfile.write(bytes('<ul>', encoding="utf-8"))
# show quote_limit quotes starting from quote_offset
if not (search[filename] or quote_limit < 0):
self.wfile.write(bytes('<br>Not Found<br><br>', encoding="utf-8"))
else:
x = 0
for window in search[filename]:
if x < quote_limit - 1:
self.wfile.write(bytes('<li><p>%s</p></li>' % window, encoding="utf-8"))
x += 1
self.wfile.write(bytes('</ul></li>', encoding="utf-8"))
# disable buttons (quotes) in particular cases
prq_disabled = ""
nq_disabled = ""
if quote_offset == 0:
prq_disabled = "disabled"
if len(search[filename]) < quotes_per_doc[i][0]:
nq_disabled = "disabled"
self.wfile.write(bytes("""
<input type="submit" name="action%d" value="Previous" %s/>
<input type="submit" name="action%d" value="To the beginning" %s/>
<input type="submit" name="action%d" value="Next" %s/>
""" % (i, prq_disabled, i, prq_disabled, i, nq_disabled), encoding="utf-8"))
self.wfile.write(bytes('</ol>', encoding="utf-8"))
# disable buttons (docs) in particular cases
pr_disabled = ""
n_disabled = ""
if offset == 0:
pr_disabled = "disabled"
if len(search.keys()) < limit+1:
n_disabled = "disabled"
self.wfile.write(bytes("""
<input type="submit" name="action" value="Previous" %s/>
<input type="submit" name="action" value="First Page" %s/>
<input type="submit" name="action" value="Next" %s/>
""" % (pr_disabled, pr_disabled, n_disabled), encoding="utf-8"))
self.wfile.write(bytes("""</form></body></html>""", encoding="utf-8"))
print('time:', time.time() - start_time)
def main():
server = HTTPServer(('', 8090), RequestHandler)
server.search_engine = SearchEngine('database')
server.serve_forever()
if __name__ == "__main__":
main()
| mathlingv-siv/tokenizer | web_server.py | web_server.py | py | 8,672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "http.server.BaseHTTPRequestHandler",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "time.time",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "cgi.FieldStorage",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "time.tim... |
38495991100 | import imp
import os.path
from collections import namedtuple
ConfigValue = namedtuple('ConfigValue', ('name', 'description'))
class NotPresentType(object):
def __repr__(self):
return "NotPresent"
NotPresent = NotPresentType()
BASE_CONFIG = (
ConfigValue('token', 'github auth token'),
ConfigValue('base_url', 'base uri to make api calls to.'),
ConfigValue('config', 'config file to use.')
)
def get_from_file(name, path=None):
"""get config value from config file."""
if path is NotPresent:
path = '~/.pygh'
if not hasattr(get_from_file, 'module'):
path = os.path.expanduser(path)
if not os.path.exists(path):
return NotPresent
with open(path) as f:
get_from_file.module = imp.load_module(
'config',
f,
path,
filter(lambda x: x[0] == '.py', imp.get_suffixes())[0]
)
return getattr(get_from_file.module, name.upper(), NotPresent)
def get_from_env(name):
"""get config from env.
looks for upper cased version of ConfigValue.name and prepends GH_ to it.
"""
return os.getenv("GH_%s" % name.upper(), NotPresent)
def get_config(args):
"""returns a dict of confg parameters.
gathers sources, in this order, from
1. ~/.pygh
2. environment
3. flags
"""
config = {}
for i in BASE_CONFIG:
val = (
get_from_file(i.name, args.config),
get_from_env(i.name),
getattr(args, i.name, NotPresent)
)
for v in val:
if v == NotPresent:
continue
config[i.name] = v
return config
def add_config_to_parser(parser):
for c in BASE_CONFIG:
parser.add_argument('--%s' % c.name, default=NotPresent,
help=c.description)
| mwhooker/gh | gh/ghconfig.py | ghconfig.py | py | 1,871 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.path.expanduser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "os.pa... |
71914015143 | from keras import layers, models, optimizers
MAXLEN = 128
CHARS = 1000
HIDDEN_DIM = 128
def make_encoder() -> models.Sequential:
model = models.Sequential(name='encoder')
model.add(layers.TimeDistributed(layers.Dense(HIDDEN_DIM), input_shape=(MAXLEN, CHARS)))
model.add(layers.LSTM(HIDDEN_DIM, return_sequences=True))
model.add(layers.LSTM(HIDDEN_DIM, return_sequences=False))
model.summary()
return model
def make_decoder() -> models.Sequential:
model = models.Sequential(name='decoder')
model.add(layers.RepeatVector(MAXLEN, input_shape=(HIDDEN_DIM,)))
model.add(layers.LSTM(128, return_sequences=True))
model.add(layers.TimeDistributed(layers.Dense(CHARS)))
model.add(layers.TimeDistributed(layers.Activation('softmax')))
model.summary()
return model
def make_encoder_decoder() -> models.Sequential:
model = models.Sequential(name='encoder_decoder')
model.add(make_encoder())
model.add(make_decoder())
opt = optimizers.Adam(clipvalue=1.0)
model.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
| cympfh/twitter-dialogue-bot | bot/model.py | model.py | py | 1,131 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.models.Sequential",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "keras.models",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "keras.layers.TimeDistributed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "keras... |
33763269561 | """The CLI entry of steamutils."""
from argparse import ArgumentParser
from ._version import __version__ as version
def main(argv=None):
"""Tools for performing analytics on steam users, steam groups and servers using the SteamAPI and various popular SourceMod plugins"""
parser = ArgumentParser()
parser.description = main.__doc__
parser.add_argument("-p", "--print", action="store_true", help="print hello world")
parser.add_argument("-V", "--version", action="version", version=version)
args = parser.parse_args(argv)
print(f"testing: Hello world! {args}")
def init():
"""For unit test only."""
if __name__ == "__main__":
main()
init()
| CrimsonTautology/steamutils | src/steamutils/__main__.py | __main__.py | py | 691 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "_version.__version__",
"line_number": 12,
"usage_type": "name"
}
] |
20529220011 | import numpy as np
import matplotlib.pyplot as plt
greyhounds = 500
lads = 500
grey_height = 28 + 4 * np.random.randn(greyhounds)
lad_height = 24 + 4 * np.random.randn(lads)
plt.hist([grey_height,lad_height],stacked=True,color=['r','b'])
plt.show() | slm960323/ML_Diary | toyex_dog.py | toyex_dog.py | py | 251 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.randn",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
36936928879 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Works with python2.6
import os
import sys
import math
import json
from subprocess import Popen, PIPE
from operator import itemgetter
class Test:
def __init__(self, path, name):
self.path = path
self.name = name
@classmethod
def from_file(cls, path, name, options):
return cls(path, name)
def find_tests(dir, substring=None):
ans = []
for dirpath, dirnames, filenames in os.walk(dir):
if dirpath == ".":
continue
for filename in filenames:
if not filename.endswith(".js"):
continue
test = os.path.join(dirpath, filename)
if substring is None or substring in os.path.relpath(test, dir):
ans.append([test, filename])
return ans
def get_test_cmd(path):
return [JS, "-f", path]
def avg(seq):
return sum(seq) / len(seq)
def stddev(seq, mean):
diffs = ((float(item) - mean) ** 2 for item in seq)
return math.sqrt(sum(diffs) / len(seq))
def run_test(test):
env = os.environ.copy()
env["MOZ_GCTIMER"] = "stderr"
cmd = get_test_cmd(test.path)
total = []
mark = []
sweep = []
close_fds = sys.platform != "win32"
p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=close_fds, env=env)
out, err = p.communicate()
out, err = out.decode(), err.decode()
float_array = [float(_) for _ in err.split()]
if len(float_array) == 0:
print("Error: No data from application. Configured with --enable-gctimer?")
sys.exit(1)
for i, currItem in enumerate(float_array):
if i % 3 == 0:
total.append(currItem)
else:
if i % 3 == 1:
mark.append(currItem)
else:
sweep.append(currItem)
return max(total), avg(total), max(mark), avg(mark), max(sweep), avg(sweep)
def run_tests(tests, test_dir):
bench_map = {}
try:
for i, test in enumerate(tests):
filename_str = '"%s"' % test.name
TMax, TAvg, MMax, MAvg, SMax, SAvg = run_test(test)
bench_map[test.name] = [TMax, TAvg, MMax, MAvg, SMax, SAvg]
fmt = '%20s: {"TMax": %4.1f, "TAvg": %4.1f, "MMax": %4.1f, "MAvg": %4.1f, "SMax": %4.1f, "SAvg": %4.1f}' # NOQA: E501
if i != len(tests) - 1:
fmt += ","
print(fmt % (filename_str, TMax, TAvg, MMax, MAvg, SMax, MAvg))
except KeyboardInterrupt:
print("fail")
return dict(
(
filename,
dict(TMax=TMax, TAvg=TAvg, MMax=MMax, MAvg=MAvg, SMax=SMax, SAvg=SAvg),
)
for filename, (TMax, TAvg, MMax, MAvg, SMax, SAvg) in bench_map.iteritems()
)
def compare(current, baseline):
percent_speedups = []
for key, current_result in current.iteritems():
try:
baseline_result = baseline[key]
except KeyError:
print(key, "missing from baseline")
continue
val_getter = itemgetter("TMax", "TAvg", "MMax", "MAvg", "SMax", "SAvg")
BTMax, BTAvg, BMMax, BMAvg, BSMax, BSAvg = val_getter(baseline_result)
CTMax, CTAvg, CMMax, CMAvg, CSMax, CSAvg = val_getter(current_result)
if CTAvg <= BTAvg:
speedup = (CTAvg / BTAvg - 1) * 100
result = "faster: %6.2f < baseline %6.2f (%+6.2f%%)" % (
CTAvg,
BTAvg,
speedup,
)
percent_speedups.append(speedup)
else:
slowdown = (CTAvg / BTAvg - 1) * 100
result = "SLOWER: %6.2f > baseline %6.2f (%+6.2f%%) " % (
CTAvg,
BTAvg,
slowdown,
)
percent_speedups.append(slowdown)
print("%30s: %s" % (key, result))
if percent_speedups:
print("Average speedup: %.2f%%" % avg(percent_speedups))
if __name__ == "__main__":
script_path = os.path.abspath(__file__)
script_dir = os.path.dirname(script_path)
test_dir = os.path.join(script_dir, "tests")
from optparse import OptionParser
op = OptionParser(usage="%prog [options] JS_SHELL [TESTS]")
op.add_option(
"-b",
"--baseline",
metavar="JSON_PATH",
dest="baseline_path",
help="json file with baseline values to " "compare against",
)
(OPTIONS, args) = op.parse_args()
if len(args) < 1:
op.error("missing JS_SHELL argument")
# We need to make sure we are using backslashes on Windows.
JS, test_args = os.path.normpath(args[0]), args[1:]
test_list = []
bench_map = {}
test_list = find_tests(test_dir)
if not test_list:
print >>sys.stderr, "No tests found matching command line arguments."
sys.exit(0)
test_list = [Test.from_file(tst, name, OPTIONS) for tst, name in test_list]
try:
print("{")
bench_map = run_tests(test_list, test_dir)
print("}")
except OSError:
if not os.path.exists(JS):
print >>sys.stderr, "JS shell argument: file does not exist: '%s'" % JS
sys.exit(1)
else:
raise
if OPTIONS.baseline_path:
baseline_map = []
fh = open(OPTIONS.baseline_path, "r")
baseline_map = json.load(fh)
fh.close()
compare(current=bench_map, baseline=baseline_map)
| mongodb/mongo | src/third_party/mozjs/extract/js/src/devtools/gc/gc-test.py | gc-test.py | py | 5,569 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.path.relpath",
"line_number"... |
8413026987 | from flask import Flask, render_template
from flask_restful import Api
from resources.hotel import Hotels, Hotel
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app)
@app.before_first_request
def create_db():
db.create_all()
@app.route("/")
def index():
return render_template("index.html")
api.add_resource(Hotels, '/hotels')
api.add_resource(Hotel, '/hotels/<string:hotel_id>')
if __name__ == '__main__':
from sql_alchemy import db
db.init_app(app)
app.run(debug=True) | mariorodeghiero/flask-python-rest-api-course | app.py | app.py | py | 590 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "resources.hotel.Hot... |
12840958073 | '''
Yangbot #2 - elo ~ 1800 - 2000
FEATURE LIST:
- UCI interface [INCOMPLETE]
- opening book [OK]
- negamax [OK]
- quiscence search [OK]
- killer moves [TESTING]
- move ordering [TESTING]
> MVV LVA [OK]
- PST [OK]
- PV [OK]
- evaluation [TESTING]
> PeSTO evaluation [OK]
> Incremental material counting [INCOMPLETE]
- iterative deepening [OK]
- null moves [TESTING]
- late move reduction [TESTING]
- transposition tables [GOOD]
- mate distance pruning [TESTING]
- razoring [INCOMPLETE]
- check extensions [INCOMPLETE]
CHANGE LOGS:
- added mate distance pruning - however it seems to mess up move ordering so i made alpha and beta temporary variables
- moved outcomes from evaluation to search
- ~piece has more priority than psqt~ Tests found that equal play is superior
weaknesses:
- prioritises trading when it is better to attack the king
- increase speed and depth ~ try to reach depth, avg depth is 5 atm
MAYBE USE CYTHON?
MAYBE PRUNE MORE BRANCHES (killer huristics not confirmed to work)
TEST NEW HASH TABLES?
'''
import chess
import chess.polyglot
from chessUtil import Piece_Square_Tables
import random
import cProfile
import pstats
import settings
import threading
import requests
import time
class HashEntry:
NONE = 0
EXACT = 1
BETA = 2
ALPHA = 3
def __init__(self):
self.key = ""
self.move = None
self.score = 0
self.depth = 0
self.flag = HashEntry.NONE
# bot class
class BOT():
INFINITY = 137000000
hashes = {}
hash_table = {}
pv_table = {}
contempt = [5, 2.5, 0]
def __init__(self):
self.saved_moved = ""
self.results = []
self.clear_search_params()
self.Mvv_Lva_Victim_scores = {
"p" : 100,
"n" : 200,
"b" : 300,
"r" : 400,
"q" : 500,
"k" : 600
}
# init mvv lva
#self.Mvv_Lva_Scores = {}
#for attacker in self.Mvv_Lva_Victim_scores.keys():
# self.Mvv_Lva_Scores[attacker] = {}
# for victim in self.Mvv_Lva_Victim_scores.keys():
# self.Mvv_Lva_Scores[attacker][victim] = int(self.Mvv_Lva_Victim_scores[victim] + 6 - (self.Mvv_Lva_Victim_scores[attacker] / 100)) + 1000000
############################## UTILITY FUNCTIONS ##############################
def clear_search_params(self):
self.nodes = 0
#self.pv_table = {}
self.killers = {0: {}, 1: {}}
for i in range(0, 400):
self.killers[0][i] = None
self.killers[1][i] = None
self.search_history = {}
for i in range(0, 64):
self.search_history[i] = {}
for ii in range(0, 64):
self.search_history[i][ii] = 0
def get_opening_move(self, board):
max_weight = 0
max_move = None
found_moves = {}
with chess.polyglot.open_reader("chessAI/data/polyglot/performance.bin") as reader:
for entry in reader.find_all(board):
if not entry.weight in found_moves:
found_moves[entry.weight] = []
found_moves[entry.weight].append(entry.move)
if entry.weight > max_weight:
max_weight = entry.weight
max_move = entry.move
# shuffle out of best moves
if max_move is None:
return None
best_moves = found_moves[max_weight]
random.shuffle(best_moves)
return best_moves[0]
def getPhase(self, board):
l = len(board.piece_map())
if 20 <= l <= 32:
return 0
elif 10 <= l < 20:
return 1
else:
return 0
def store_hash(self, pos, move, score, flag, depth):
he = HashEntry()
he.key = pos
he.move = move
he.score = score
he.flag = flag
he.depth = depth
BOT.hash_table[pos] = he
def get_hash(self, pos):
if pos in BOT.hash_table:
return BOT.hash_table[pos]
return None
def store_pvline(self, position_hash, move):
BOT.pv_table[position_hash] = move
def get_pvline(self, position_hash, turn):
if position_hash not in BOT.pv_table:
return None
return BOT.pv_table[position_hash]
def retrieve_pvline(self, board):
pv_line = list()
_b = board.copy()
for _ in range(10000):
#h = self.cu.get_board_hash_pychess(_b)
h = _b.fen()
hash_entry = self.get_hash(h)
if hash_entry is not None and hash_entry.flag == HashEntry.EXACT:
pv_line.append(hash_entry.move)
_b.push(hash_entry.move)
else:
break
return pv_line
def get_pv_line_san(self, board, line):
san_list = list()
b = board.copy()
for move in line:
san_list.append(b.san(move))
b.push(move)
return san_list
############################ EVALUATION AND SEARCH ################################
@staticmethod
def evaluate_EVAL(board):
#if board.is_checkmate():
# return -1 * (BOT.INFINITY - board.ply())
#elif board.is_stalemate() or board.is_repetition() or board.is_insufficient_material():
# return 0
mg, eg, phase = 0, 0, 0
#bq, wq = (len(board.pieces(chess.QUEEN, chess.BLACK)) > 0), (len(board.pieces(chess.QUEEN, chess.WHITE)) > 0)
pieces = board.piece_map()
for pos, _piece in board.piece_map().items():
piece = str(_piece)
# piece values have more priority
mg += ((Piece_Square_Tables.mg_values[piece.lower()] * (-1 if piece.islower() else 1))) / 100
eg += ((Piece_Square_Tables.mg_values[piece.lower()] * (-1 if piece.islower() else 1))) / 100
if piece.islower(): # black
#if wq == False and piece.lower() == "k": # no restrictions if white queen of the table
# continue
mg -= Piece_Square_Tables.PSQT_MG[piece][pos] / 100
eg -= Piece_Square_Tables.PSQT_EG[piece][pos] / 100
else:
#if bq == False and piece.lower() == "k": # no restrictions if black queen of the table
# continue
mg += Piece_Square_Tables.PSQT_MG[piece.lower()][Piece_Square_Tables.mirror_table[pos]] / 100
eg += Piece_Square_Tables.PSQT_EG[piece.lower()][Piece_Square_Tables.mirror_table[pos]] / 100
phase += Piece_Square_Tables.phase[piece.lower()]
mobility = 0
if not board.is_check():
num_1 = board.legal_moves.count()
board.push(chess.Move.null())
num_2 = board.legal_moves.count()
board.pop()
if board.turn == chess.WHITE:
mobility = num_1 - num_2
else:
mobility = num_2 - num_1
mgp = min(phase, 24)
egp = 24-mgp
#evadj = 0
'''
if (len(board.pieces(chess.BISHOP, chess.BLACK)) >= 2):
evadj -= 500
if (len(board.pieces(chess.BISHOP, chess.WHITE)) >= 2):
evadj += 500
'''
#sq = (mg * (256 - phase)) + ((eg * phase) / 256)
sq = (mg * mgp + eg * egp) / 24 # idk about the legal moves
white_pawns, black_pawns = [], []
for k, v in pieces.items():
if str(v) == 'P':
white_pawns.append(k)
elif str(v) == 'p':
black_pawns.append(k)
doubled_white = 0
doubled_black = 0
isolated_white = 0
isolated_black = 0
blocked_white = 0
blocked_black = 0
for pawn in white_pawns:
# check for each pawn
file_number = pawn % 8
# is pawn blocked ( for white +8 for black -8)
if pawn + 8 < 64 and board.piece_at(pawn + 8) is not None:
blocked_white += 1
has_left_neighbor = False
has_right_neighbor = False
if pawn % 8 == 0:
has_left_neighbor = True
if pawn % 8 == 7:
has_right_neighbor = True
# is it doubled, is another pawn on the file
for other_pawn in white_pawns:
if other_pawn != pawn and abs(pawn - other_pawn) % 8 == 0:
doubled_white += 1
# isolation check left file ( if exists)
other_file = other_pawn % 8
if file_number - other_file == 1:
has_left_neighbor = True
if file_number - other_file == -1:
has_right_neighbor = True
if not has_left_neighbor and not has_right_neighbor:
isolated_white += 1
# check for black pawns
for pawn in black_pawns:
# check for each pawn
file_number = pawn % 8
# is pawn blocked ( for white +8 for black -8)
if pawn - 8 >= 0 and board.piece_at(pawn - 8) is not None:
blocked_black += 1
has_left_neighbor = False
has_right_neighbor = False
if pawn % 8 == 0:
has_left_neighbor = True
if pawn % 8 == 7:
has_right_neighbor = True
# is it doubled, is another pawn on the file
for other_pawn in black_pawns:
if other_pawn != pawn and abs(pawn - other_pawn) % 8 == 0:
doubled_black += 1
# isolation check left file ( if exists)
other_file = other_pawn % 8
if file_number - other_file == 1:
has_left_neighbor = True
if file_number - other_file == -1:
has_right_neighbor = True
if not has_left_neighbor and not has_right_neighbor:
isolated_black += 1
pawn_influence = ((doubled_white - doubled_black) + (isolated_white - isolated_black) + (blocked_white - blocked_black)) * 0.1
#print(pawn_influence)
sq -= pawn_influence
return (sq + 0.1*mobility) * (-1 if board.turn == chess.BLACK else 1)
# static exchange evaluation
def static_exchange_evaluation(self, board, moveSquare):
value = 0
attackers = list(board.attackers(board.turn, moveSquare)) # get attackers to the square
if attackers: # if we have attackers
piece_types = [board.piece_type_at(x) for x in attackers] # get the piece type of these attackers
lowest = min(piece_types, key=lambda x: Piece_Square_Tables.values[x]) # get lowest piece
attacker_index = piece_types.index(lowest) # get index of lowest piece
try:
capture = Piece_Square_Tables.values[board.piece_type_at(moveSquare)]
except:
capture = 100 # pawn
try:
next_move = board.find_move(attackers[attacker_index], moveSquare) # make capture move
board.push(next_move) # push
value = max(0, capture - self.static_exchange_evaluation(board, moveSquare))
board.pop()
except:
value = 0
return value
# see capture
def start_SEE(self, board, move): # push capture move
value = 0
m = move.to_square
try:
capture = Piece_Square_Tables.values[board.piece_type_at(m)]
except:
capture = 100 # pawn
board.push(move)
value = max(0, capture - self.static_exchange_evaluation(board, m))
board.pop()
return value
# MOVE ORDERING
def orderM(self, board, unscored_moves, pv_move, Quiscence):
scored_moves = {}
if Quiscence:
for move in unscored_moves:
if pv_move is not None and move == pv_move:
scored_moves[move] = 20000000
continue
## all non captures are at the end of the list
if board.is_capture(move):
## all captures have to be scored and thus sorted
#attacker = board.piece_at(move.from_square).symbol().lower()
#try:
# victim = board.piece_at(move.to_square).symbol().lower()
#except:
# victim = 'p'
#scored_moves[move] = self.Mvv_Lva_Scores[attacker][victim]
scored_moves[move] = self.start_SEE(board, move)
else:
for move in unscored_moves:
## all non captures are at the end of the list
if not board.is_capture(move):
### check if non capture is killer move 1st order
if self.killers[0][board.ply()] == move:
scored_moves[move] = 900000
elif self.killers[1][board.ply()] == move: # ????
scored_moves[move] = 800000
else:
scored_moves[move] = self.search_history[move.from_square][move.to_square]
# pawn promotions
if move.promotion:
scored_moves[move] += Piece_Square_Tables.values[move.promotion]
else:
## all captures have to be scored and thus sorted
#attacker = board.piece_at(move.from_square).symbol().lower()
#try:
# victim = board.piece_at(move.to_square).symbol().lower()
#except:
# victim = 'p'
#scored_moves[move] = self.Mvv_Lva_Scores[attacker][victim]
scored_moves[move] = self.start_SEE(board, move)
ordered_move_list = sorted(scored_moves, key=scored_moves.get)
ordered_move_list.reverse()
return ordered_move_list
@staticmethod
def evaluate(board):
return BOT.evaluate_EVAL(board)
# QUISCENCE SEARCH
def SearchAllCaptures(self, board, alpha, beta):
old_a = alpha
best_move = None
h = board.fen()
pv_move = self.get_pvline(h, board.turn)
self.nodes += 1
eval = self.evaluate(board) # change this (wtm = true for odd, false for even)
if (eval >= beta):
return beta
# delta pruning
if eval < alpha-9:
return alpha
alpha = max(alpha, eval)
l = board.legal_moves
captures = self.orderM(board, l, pv_move, True)
#print(captures)
for m in captures:
board.push(m)
eval = -self.SearchAllCaptures(board, -beta, -alpha) # this line slow asf
board.pop()
if eval > alpha:
if eval >= beta:
return beta
alpha = eval
best_move = m
if alpha != old_a:
self.store_pvline(h, best_move)
return alpha
def startSearch(self, board):
best_move_FOUND = None
self.saved_moved = None
self.clear_search_params()
entry_time = time.time()
for depth in range(1, settings.DEPTH):
if time.time() - entry_time > settings.MAXT/(depth/4): # dont bother starting another depth if we ate through most our time
print("TIMEOUT")
break
self.nodes = 0
self.fh = 0
self.fhf = 0
currentItSearchDepth = depth + 1
_start = time.time()
best_score = self.search(board, depth, -BOT.INFINITY, BOT.INFINITY, depth, True)
pvM = self.retrieve_pvline(board)
if len(pvM) > 0:
best_move_FOUND = pvM[0]
else:
if depth == settings.DEPTH-1:
best_score = self.search(board, depth+1, -BOT.INFINITY, BOT.INFINITY, depth+1, True)
print(f"XTRA: Depth {depth+1} Nodes: {self.nodes} Move: {board.san(best_move_FOUND)} Time: {time.time() - _start} Score: {best_score}")
print(f"Depth {currentItSearchDepth} | Nodes: {self.nodes} | Move: {board.san(best_move_FOUND)} | Time: {time.time() - _start} | Score: {best_score}")
print(f"Move Ordering {self.fhf /max(self.fh,1)}")
print("PV LINE: ",self.get_pv_line_san(board, pvM))
if best_score >= BOT.INFINITY - 100:
# found checkmate
return best_move_FOUND
return best_move_FOUND
def search(self, board, depth, alpha, beta, maxd, null_move):
move_score = -BOT.INFINITY
if board.is_checkmate():
return board.ply() - BOT.INFINITY #* (-1 if board.turn else 1)
elif board.is_stalemate() or board.is_repetition() or board.is_insufficient_material():
return -(0 + BOT.contempt[self.getPhase(board)]) #* (-1 if board.turn else 1)
# mate distance pruning ?
if board.ply() > 0:
alphaM = max(alpha, board.ply() - BOT.INFINITY)
betaM = max (beta, BOT.INFINITY - board.ply())
if alphaM >= betaM:
return alphaM
# transposition tables
h = board.fen()
hash_entry = self.get_hash(h)
if hash_entry is not None:
if hash_entry.depth >= depth:
if hash_entry.flag == HashEntry.EXACT:
return hash_entry.score
elif hash_entry.flag == HashEntry.ALPHA and hash_entry.score <= alpha:
return alpha
elif hash_entry.flag == HashEntry.BETA and hash_entry.score >= beta:
return beta
self.nodes += 1
l = board.legal_moves
# no legal moves
if l.count() == 0:
return self.evaluate(board)
razoringMargin = 5 # value of a queen (maybe change this)
if depth <= 0:
return self.SearchAllCaptures(board, alpha, beta)
elif depth <= 2: # razoring
if alpha == beta - 1:
if (self.evaluate(board) + razoringMargin * depth) < beta:
value = self.SearchAllCaptures(board, alpha, beta)
#move_score = -self.search(board, depth-2, -window[1], -window[0], maxd, null_move)
if value < beta:
return value
#elif depth <= 4:
# if alpha == beta-1:
# if self.evaluate(board) < beta - (200 + 2 * depth):
# # razoring
# result = self.SearchAllCaptures(board, alpha, beta)
# if result < beta:
# return result
if null_move and not board.is_check() and board.ply() > 0 and depth >= 3 and len(board.piece_map()) >= 10: #NULL MOVE - also wanna prevent zugzwang
board.push(chess.Move.null())
move_score = -1 * self.search(board, depth - 3, -beta, -beta + 1, maxd, False)
board.pop()
if move_score >= beta:
return beta
move_score = -BOT.INFINITY
best_score = -BOT.INFINITY
old_a = alpha
best_move = None
moves = self.orderM(board, l, None, False)
#print(moves)
legal = 0
pvM = self.retrieve_pvline(board)
# implement PVS in the future
# test first move
for i, m in enumerate(moves):
#m = Move.from_uci(m)
if i == 0:
board.push(m)
move_score = -self.search(board, depth-1, -beta, -alpha, maxd, null_move)
else:
legal += 1
wascheck = board.is_check()
board.push(m)
"""
elif depth <= 3:
ev = self.evaluate(board)
if depth < 2:
value = ev + 2.5
elif depth < 3:
value = ev + 3.2
else:
value = ev + 4.7
if value < beta:
board.pop()
best_move = m
self.store_hash(board.fen(), best_move, beta, HashEntry.BETA, depth)
continue
"""
# Aspiration window
window = (alpha, alpha+1) if alpha+1 < beta else (alpha, beta)
if i > 3 and depth > 3 and not board.is_check() and not board.is_capture(m) and m not in pvM and not wascheck: # LMR
move_score = -self.search(board, depth-2, -window[1], -window[0], maxd, null_move)
else: move_score = -self.search(board, depth-1, -window[1], -window[0], maxd, null_move) # PVS
# REDO PVS
if (move_score > alpha and move_score > beta): # PVS
move_score = -self.search(board, depth-1, -beta, -alpha, maxd, null_move) # do full search
if move_score > alpha:
alpha = move_score
board.pop()
if move_score > best_score:
best_score = move_score
best_move = m
if move_score > alpha:
if move_score >= beta:
if legal == 1:
self.fhf += 1
self.fh += 1
# killer moves
if not board.is_capture(m):
self.killers[1][board.ply()] = self.killers[0][board.ply()]
self.killers[0][board.ply()] = m
self.store_hash(board.fen(), best_move, beta, HashEntry.BETA, depth)
return beta
alpha = move_score
best_move = m
if not board.is_capture(m):
self.search_history[best_move.from_square][best_move.to_square] += depth
if alpha != old_a:
self.store_hash(board.fen(), best_move, best_score, HashEntry.EXACT, depth)
else:
# STORE HASH pos, bestmove, alpha, ALPHA, depth
self.store_hash(board.fen(), best_move, alpha, HashEntry.ALPHA, depth)
return alpha
############################ BOT/MODULE INTERFACE #################################
def tablebase(self, board) :
if len(board.piece_map()) <= 7 :
try :
fen = board.fen().replace(' ', '_')
info = requests.get(f'http://tablebase.lichess.ovh/standard?fen={fen}')
return info.json()['moves'][0]['uci']
except Exception :
pass
return None
def play(self, board): #
r = self.get_opening_move(board)
if r is not None:
return r
r = self.tablebase(board)
if r == None:
r = self.startSearch(board)
return r
# testing
if __name__ == "__main__":
#board = chess.Board("1k1r4/pp1b1R2/3q2pp/4p3/2B5/4Q3/PPP2B2/2K5 b - - 0 1")
board = chess.Board("r1b2r1k/2qnb1pp/p2pNn2/1p6/4P3/P1N1B1Q1/1PP2PPP/R4R1K b - - 0 16")
#board = chess.Board("8/8/8/1K6/1p1kPb2/1Pp5/P1B5/8 b - - 6 64")
#board = chess.Board("8/7K/8/8/8/8/R7/7k w - - 0 1")
#print(list(board.pieces))
board.turn = False
pr = cProfile.Profile()
pr.enable()
print(BOT().play(board))
pr.disable()
ps = pstats.Stats(pr).sort_stats('tottime')
ps.print_stats()
| yangman946/yanchess-ai | chessAI/test.py | test.py | py | 24,405 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "chess.polyglot.open_reader",
"line_number": 131,
"usage_type": "call"
},
{
"api_name": "chess.polyglot",
"line_number": 131,
"usage_type": "attribute"
},
{
"api_name": "random.shuffle",
"line_number": 150,
"usage_type": "call"
},
{
"api_name": "ches... |
20136544568 | import torch
import torch.nn as nn
import torch.nn.functional as F
from src.utils import config
from .Vis_Module import Vis_Module
class DeepFakeTSModel(nn.Module):
def __init__(self, mm_module_properties,
modalities,
window_size, window_stride,
modality_embedding_size,
module_networks,
batch_first=True,
multi_modal_nhead=4,
mm_embedding_attn_merge_type='sum',
dropout=0.1,
activation="relu",
is_guiding=False,
num_activity_types=2,
is_pretrained_feature=False):
super(DeepFakeTSModel, self).__init__()
self.mm_module_properties = mm_module_properties
self.modalities = modalities
self.num_modality = len(modalities)
self.batch_first = batch_first
self.multi_modal_nhead = multi_modal_nhead
self.mm_embedding_attn_merge_type = mm_embedding_attn_merge_type
self.dropout = dropout
self.activation = activation
self.window_size = window_size
self.window_stride = window_stride
self.modality_embedding_size = modality_embedding_size
self.module_networks = module_networks
self.num_module_networks = len(self.module_networks)
self.is_guiding = is_guiding
self.num_activity_types = 2
self.is_pretrained_feature = is_pretrained_feature
print('module_networks', self.module_networks)
self.lstm_bidirectional = False
self.mm_module = nn.ModuleDict()
for modality in self.module_networks:
self.mm_module[modality] = Vis_Module(cnn_in_channel=self.mm_module_properties[modality]['cnn_in_channel'],
feature_embed_size=self.mm_module_properties[modality]['feature_embed_size'],
kernel_size=self.mm_module_properties[modality]['kernel_size'],
lstm_hidden_size=self.mm_module_properties[modality]['lstm_hidden_size'],
fine_tune=self.mm_module_properties[modality]['fine_tune'],
batch_first=self.batch_first,
window_size=self.window_size,
window_stride=self.window_stride,
n_head=self.mm_module_properties[modality]['module_embedding_nhead'],
dropout=self.mm_module_properties[modality]['dropout'],
activation=self.mm_module_properties[modality]['activation'],
encoder_num_layers=self.mm_module_properties[modality]['lstm_encoder_num_layers'],
lstm_bidirectional=self.mm_module_properties[modality]['lstm_bidirectional'],
lstm_dropout=self.mm_module_properties[modality]['lstm_dropout'],
pool_fe_kernel=self.mm_module_properties[modality]['feature_pooling_kernel'],
pool_fe_stride=self.mm_module_properties[modality]['feature_pooling_stride'],
pool_fe_type=self.mm_module_properties[modality]['feature_pooling_type'],
is_guiding=self.is_guiding,
is_pretrained_feature = self.is_pretrained_feature)
if (self.mm_module_properties[modality]['lstm_bidirectional']):
self.lstm_bidirectional = True
if (self.lstm_bidirectional):
self.modality_embedding_size = 2 * self.modality_embedding_size
self.mm_embeddings_bn = nn.BatchNorm1d(self.num_module_networks)
self.mm_embeddings_relu = nn.ReLU()
self.mm_embeddings_dropout = nn.Dropout(p=self.dropout)
self.mm_mhattn = nn.MultiheadAttention(embed_dim=self.modality_embedding_size,
num_heads=self.multi_modal_nhead,
dropout=self.dropout)
self.mm_mhattn_bn = nn.BatchNorm1d(self.num_module_networks)
self.mm_mhattn_relu = nn.ReLU()
self.mm_mhattn_dropout = nn.Dropout(p=self.dropout)
if (self.mm_embedding_attn_merge_type == 'sum'):
if (self.lstm_bidirectional):
self.fc_output1 = nn.Linear(self.num_module_networks * self.modality_embedding_size,
self.num_module_networks * self.modality_embedding_size // 2)
self.fc_output2 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 2,
self.num_module_networks * self.modality_embedding_size // 4)
self.fc_output3 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 4,
self.num_module_networks * self.num_activity_types)
else:
self.fc_output1 = nn.Linear(self.num_module_networks * self.modality_embedding_size,
self.num_module_networks * self.modality_embedding_size // 2)
self.fc_output2 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 2,
self.num_module_networks * self.num_activity_types)
else:
if (self.lstm_bidirectional):
self.fc_output1 = nn.Linear(self.num_module_networks * self.modality_embedding_size,
self.num_module_networks * self.modality_embedding_size // 2)
self.fc_output2 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 2,
self.num_module_networks * self.modality_embedding_size // 4)
self.fc_output3 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 4,
self.num_activity_types)
else:
self.fc_output1 = nn.Linear(self.num_module_networks * self.modality_embedding_size,
self.num_module_networks * self.modality_embedding_size // 2)
self.fc_output2 = nn.Linear(self.num_module_networks * self.modality_embedding_size // 2,
self.num_activity_types)
self.module_attn_weights = None
self.mm_attn_weight = None
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.fc_output1.weight)
nn.init.constant_(self.fc_output1.bias, 0.)
nn.init.xavier_uniform_(self.fc_output2.weight)
nn.init.constant_(self.fc_output2.bias, 0.)
if (self.lstm_bidirectional):
nn.init.xavier_uniform_(self.fc_output3.weight)
nn.init.constant_(self.fc_output3.bias, 0.)
def forward(self, input):
# print('########### Start MM_HAR_Module ###########')
attn_output = {}
self.module_attn_weights = {}
for module_network in self.module_networks:
if(self.is_guiding):
tm_attn_output, self.module_attn_weights[module_network] = \
self.mm_module[module_network](input[config.fake_modality_tag],
input[config.real_modality_tag],
input[config.fake_modality_tag+config.modality_mask_suffix_tag],
input[config.real_modality_tag + config.modality_mask_suffix_tag])
else:
tm_attn_output, self.module_attn_weights[module_network] = \
self.mm_module[module_network](input[config.fake_modality_tag],
None,
input[config.fake_modality_tag + config.modality_mask_suffix_tag],
None)
# tm_attn_output = torch.sum(tm_attn_output, dim=1).squeeze(dim=1)
attn_output[module_network] = tm_attn_output
# print(f'attn_output[{modality}] size: {attn_output[modality].size()}')
mm_embeddings = []
for modality in self.module_networks:
# print(f'{modality} embedding size {attn_output[modality].size()}')
mm_embeddings.append(attn_output[modality])
mm_embeddings = torch.stack(mm_embeddings, dim=1).contiguous()
# mm_embeddings = self.mm_embeddings_dropout(self.mm_embeddings_relu(self.mm_embeddings_bn(mm_embeddings)))
mm_embeddings = self.mm_embeddings_relu(self.mm_embeddings_bn(mm_embeddings))
nbatches = mm_embeddings.shape[0]
# transpose batch and sequence (B x S x ..) --> (S x B x ..)
mm_embeddings = mm_embeddings.transpose(0, 1).contiguous()
mattn_output, self.mm_attn_weight = self.mm_mhattn(mm_embeddings, mm_embeddings, mm_embeddings)
mattn_output = mattn_output.transpose(0,1).contiguous() # transpose batch and sequence (S x B x ..) --> (B x S x ..)
# print('mattn_output',mattn_output.size())
mattn_output = self.mm_mhattn_dropout(self.mm_mhattn_relu(self.mm_mhattn_bn(mattn_output)))
if(self.mm_embedding_attn_merge_type=='sum'):
mattn_output = torch.sum(mattn_output, dim=1).squeeze(dim=1)
mattn_output = mattn_output.contiguous().view(nbatches, -1)
# print('mattn_output shape', mattn_output.size())
if (self.lstm_bidirectional):
output = self.fc_output1(mattn_output)
output = self.fc_output2(output)
output = self.fc_output3(output)
else:
output = self.fc_output1(mattn_output)
output = self.fc_output2(output)
# print('final output shape', output.size())
# print('########### End MM_HAR_Module ###########')
return F.log_softmax(output, dim=1)
| mmiakashs/Multimodal-Deepfakes-Detection | src/network/DeepFakeTSModel.py | DeepFakeTSModel.py | py | 10,428 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.ModuleDict",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
556015803 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class JobparserPipeline(object):
def __init__(self):
client = MongoClient('localhost', 27017)
self.mongo_base = client.vacancy
def process_item(self, item, spider):
collection = self.mongo_base[spider.name]
item['min_salary'], item['max_salary'], item['valuta'] = \
JobparserPipeline.salary_editor(item['salary'], spider.name)
del(item['salary'])
if item['name']:
collection.insert_one(item)
return item
@staticmethod
def salary_editor(salary_lst, site):
min_salary = None
max_salary = None
valuta = None
def val_symbol(a):
val_dct = {'₽': 'RUB',
'USD': 'USD',
'руб.': 'RUB',
'бел.\xa0руб.': 'BYN',
'KZT': 'KZT',
'RUB': 'RUB',
'EUR': 'EUR',
'грн.': 'UAH'}
return val_dct[a]
def salary_hh(sal_lst):
nonlocal min_salary, max_salary, valuta
if sal_lst[0].replace(' ', '') == 'до':
max_salary = int(sal_lst[1].replace('\xa0', ''))
valuta = val_symbol(sal_lst[3])
elif sal_lst[0].replace(' ', '') == 'от':
min_salary = int(sal_lst[1].replace('\xa0', ''))
if sal_lst[2].replace(' ', '') == 'до':
max_salary = int(sal_lst[3].replace('\xa0', ''))
valuta = val_symbol(sal_lst[5])
else:
valuta = val_symbol(sal_lst[3])
return min_salary, max_salary, valuta
def salary_sj(sal_lst):
nonlocal min_salary, max_salary, valuta
if sal_lst[0].replace('\xa0', '').isdigit():
if len(sal_lst) > 3:
min_salary = int(sal_lst[0].replace('\xa0', ''))
max_salary = int(sal_lst[4].replace('\xa0', ''))
valuta = val_symbol(sal_lst[6])
else:
min_salary, max_salary = int(sal_lst[0].replace('\xa0', ''))
valuta = val_symbol(sal_lst[2])
elif sal_lst[0].replace(' ', '') == 'от':
min_salary = int(sal_lst[2].replace('\xa0', ''))
valuta = val_symbol(sal_lst[4])
elif sal_lst[0].replace(' ', '') == 'до':
max_salary = int(sal_lst[2].replace('\xa0', ''))
valuta = val_symbol(sal_lst[4])
return min_salary, max_salary, valuta
if site == 'hh':
return salary_hh(salary_lst)
elif site == 'sj':
return salary_sj(salary_lst)
| GruXsqK/Methods_scraping | Lesson_5/jobparser/pipelines.py | pipelines.py | py | 2,967 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 12,
"usage_type": "call"
}
] |
38221108073 | from itertools import product
def solution(word):
dic = ['A', 'E', 'I', 'O', 'U']
data = []
for i in range(1,6):
data += list(map(''.join, product(dic, repeat=i)))
data.sort()
idx = data.index(word)
return idx + 1 | kh-min7/Programmers | 84512(모음사전).py | 84512(모음사전).py | py | 245 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 7,
"usage_type": "call"
}
] |
28864712409 | import requests
import lxml
from bs4 import BeautifulSoup
import re
def dadjokes():
source = requests.get(
"https://www.boredpanda.com/funny-dad-jokes-puns/?utm_source=google&utm_medium=organic&utm_campaign=organic").text
soup = BeautifulSoup(source, 'lxml')
article = soup.find_all('article')
contents = soup.find_all("span", class_="bordered-description")
with open('joke.txt', "w") as f:
for content in contents:
content = content.text
f.writelines(content + '\n')
dadjokes()
''' content = content.findAll('li')
jokes = []
for x, con in enumerate(content):
con = str(con)
con = con[4:]
con = con[:-5]
if '<em>' or '</em>' or '\n' or '<i>' or '</i>' or '<br/>' or '\xa0' in con:
con = con.replace("<em>", "")
con = con.replace("</em>", "")
con = con.replace("\n", "")
con = con.replace('<i>', "")
con = con.replace('</i>', "")
con = con.replace('<br/>', "")
con = con.replace("\xa0", "")
jokes.append(con)
with open('jokes.txt', "w") as f:
for joke in jokes:
f.writelines(joke + '\n')
''' | rushihadole/discord_bot | jokescrapper.py | jokescrapper.py | py | 1,264 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 11,
"usage_type": "call"
}
] |
74224409063 |
import datetime
import logging
import re
from math import ceil
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import seaborn as sns
import skimage.io as io
from numpy.linalg import LinAlgError
from skimage import img_as_float
from skimage.color import rgb2lab
from skimage.exposure import equalize_adapthist
from skimage.filters import gaussian
from skimage.transform import resize
from sklearn.cluster import KMeans
from src.data_loader import sample_names, images, root_dir, FOLDER_EXPERIMENTS, get_expert_Ki67
from src.utils import apply_on_normalized_luminance, colormap, outline_regions, average_color, \
visualize_contrasted_grayscale
from src.v5_splitting_cells.cellmask_processing import fill_holes, remove_thin_structures, manhattan_distance_to_mask, \
local_maxima_location
MAX_PATIENTS = None
MAX_IMAGES_PER_PATIENT = None
SCALE = None # None to deactivate
GAUSSIAN_FILTER_SD = 2
CLUSTERING_NUM_CENTROIDS = 4
RADIUS_FILL_HOLES = 3
WIDTH_REMOVE_THIN_STRUCTURES = 12
if __name__ == "__main__":
execution_id = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
results_dir = root_dir(FOLDER_EXPERIMENTS(version=7), execution_id)
os.makedirs(results_dir, exist_ok=True)
logging.basicConfig(
level=logging.INFO,
handlers=[
logging.FileHandler(os.path.join(results_dir, 'log.txt')),
logging.StreamHandler()
]
)
s_names = sample_names()
df = None
for idx_s, s_name in enumerate(s_names[0:MAX_PATIENTS]):
Ki67_gt = get_expert_Ki67(s_name)
for path_image, original_image in images(patient_name=s_name, max_images=MAX_IMAGES_PER_PATIENT):
m = re.search(f'/{s_name}-(?P<n_img>[0-9]+).(?P<ext>[a-zA-Z]+)$', path_image)
img_ext = m.group('ext')
img_number = int(m.group('n_img'))
img_name = f'{s_name}-{img_number}'
img_filename = f'{img_name}.{img_ext}'
results_p_dir = os.path.join(results_dir, s_name, img_name)
os.makedirs(results_p_dir, exist_ok=True)
result = {
'execution_id': execution_id,
'sample_name': s_name,
'img_number': img_number,
'img_file': img_filename,
'Ki67_gt': Ki67_gt,
}
logging.info(f'Processing: {s_name}-{img_number}')
if SCALE:
logging.info('Resizing image')
sz = [ceil(d*SCALE) for d in original_image.shape[:2]] + [3]
original_image = resize(img_as_float(original_image), sz, mode='reflect', anti_aliasing=True)
io.imsave(fname=os.path.join(results_p_dir, '01 01 Original.jpg'),
arr=original_image)
image = original_image
logging.info('Gaussian filter')
image = apply_on_normalized_luminance(
operation=lambda img: gaussian(img, sigma=GAUSSIAN_FILTER_SD),
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, f'01 02 - Gaussian filter.jpg'),
arr=image)
logging.info('CLAHE')
image = apply_on_normalized_luminance(
lambda img: equalize_adapthist(img, clip_limit=0.02),
image_rgb=image)
io.imsave(fname=os.path.join(results_p_dir, f'01 03 - CLAHE.jpg'),
arr=image)
logging.info('K-means clustering')
image_flat = rgb2lab(image).reshape((-1, 3))
clustering = KMeans(n_clusters=CLUSTERING_NUM_CENTROIDS, random_state=0).fit(image_flat)
io.imsave(fname=os.path.join(results_p_dir, f'02 K-means - labels.jpg'),
arr=colormap(clustering.labels_.reshape(image.shape[0:2])))
io.imsave(fname=os.path.join(results_p_dir, f'02 K-means - regions.jpg'),
arr=outline_regions(image=original_image, region_labels=clustering.labels_.reshape(image.shape[0:2])))
io.imsave(fname=os.path.join(results_p_dir, f'02 K-means - average_color.jpg'),
arr=average_color(image=original_image, region_labels=clustering.labels_.reshape(image.shape[0:2])))
result['clustering_inertia'] = clustering.inertia_
logging.info('Class separation')
# Positive mask: cluster with maximum on channel a
idx_positive_cluster = np.argmax(clustering.cluster_centers_[:, 1])
positive_mask = np.equal(clustering.labels_, idx_positive_cluster).reshape(image.shape[0:2])
# Negative mask: cluster with minimum on channel b
idx_negative_cluster = np.argmin(clustering.cluster_centers_[:, 2])
negative_mask = np.equal(clustering.labels_, idx_negative_cluster).reshape(image.shape[0:2])
# Visualize
io.imsave(fname=os.path.join(results_p_dir, f'03 Positives.jpg'),
arr=outline_regions(image=original_image, region_labels=positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'03 Negatives.jpg'),
arr=outline_regions(image=original_image, region_labels=negative_mask))
result['raw_positive_area_ratio'] = np.sum(positive_mask)/positive_mask.size
result['raw_negative_area_ratio'] = np.sum(negative_mask)/positive_mask.size
result['Ki67_from_raw_area_ratios'] = \
result['raw_positive_area_ratio'] / (result['raw_positive_area_ratio']+result['raw_negative_area_ratio'])
logging.info('Mask postprocessing')
positive_mask = fill_holes(positive_mask, max_radius=RADIUS_FILL_HOLES)
positive_mask = remove_thin_structures(positive_mask, min_width=WIDTH_REMOVE_THIN_STRUCTURES)
negative_mask = fill_holes(negative_mask, max_radius=RADIUS_FILL_HOLES)
negative_mask = remove_thin_structures(negative_mask, min_width=WIDTH_REMOVE_THIN_STRUCTURES)
# Visualize
io.imsave(fname=os.path.join(results_p_dir, f'04 Positives postprocessed.jpg'),
arr=outline_regions(image=original_image, region_labels=positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'04 Positives postprocessed.png'), arr=colormap(positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'04 Positives postprocessed.jpg'),
arr=outline_regions(image=original_image, region_labels=positive_mask))
io.imsave(fname=os.path.join(results_p_dir, f'04 Negatives postprocessed.png'), arr=colormap(negative_mask))
result['corrected_positive_area_ratio'] = np.sum(positive_mask)/positive_mask.size
result['corrected_negative_area_ratio'] = np.sum(negative_mask)/positive_mask.size
result['Ki67_from_corrected_area_ratios'] = \
result['corrected_positive_area_ratio'] / (result['corrected_positive_area_ratio']+result['corrected_negative_area_ratio'])
logging.info('Split cells')
positive_pixelwise_radius = manhattan_distance_to_mask(mask=~positive_mask)
positive_cell_location = local_maxima_location(positive_pixelwise_radius)
negative_pixelwise_radius = manhattan_distance_to_mask(mask=~negative_mask)
negative_cell_location = local_maxima_location(negative_pixelwise_radius)
# Visualize
io.imsave(fname=os.path.join(results_p_dir, f'05 Positives - Distance to border.jpg'),
arr=visualize_contrasted_grayscale(positive_pixelwise_radius))
io.imsave(fname=os.path.join(results_p_dir, f'05 Positives - Locations border.jpg'),
arr=outline_regions(image=original_image, region_labels=positive_cell_location))
io.imsave(fname=os.path.join(results_p_dir, f'05 Positives - Distance to border.jpg'),
arr=visualize_contrasted_grayscale(negative_pixelwise_radius))
io.imsave(fname=os.path.join(results_p_dir, f'05 Positives - Locations border.jpg'),
arr=outline_regions(image=original_image, region_labels=negative_cell_location))
result['positive_count'] = np.sum(positive_cell_location)
result['negative_count'] = np.sum(negative_cell_location)
result['Ki67_from_count'] = result['positive_count'] / (result['positive_count']+result['negative_count'])
if df is None:
df = pd.DataFrame(columns=result.keys())
df = df.append(pd.DataFrame(result, index=[result['img_file']]))
df.to_excel(os.path.join(results_dir, 'df_all_results.xlsx'))
df.to_csv(os.path.join(results_dir, 'df_all_results.csv'))
try:
sns.catplot(x='Ki67_gt', y='Ki67_from_count', data=df)
plt.savefig(os.path.join(results_dir, 'df_catplot.png'))
plt.close()
plt.clf()
fig, axes = plt.subplots(nrows=1, ncols=3)
sns.violinplot(x='Ki67_gt', y="Ki67_from_raw_area_ratios", data=df, ax=axes[0])
sns.violinplot(x='Ki67_gt', y="Ki67_from_corrected_area_ratios", data=df, ax=axes[1])
sns.violinplot(x='Ki67_gt', y="Ki67_from_count", data=df, ax=axes[2])
fig.tight_layout()
plt.savefig(os.path.join(results_dir, 'df_violin.png'))
plt.close()
plt.clf()
sns.pairplot(data=df, hue='Ki67_gt', vars=["Ki67_from_raw_area_ratios", "Ki67_from_corrected_area_ratios", "Ki67_from_count"])
plt.savefig(os.path.join(results_dir, 'df_pairplot.png'))
plt.close()
plt.clf()
except (ValueError, LinAlgError):
pass
| AntoineRouland/ki67 | src/v7_validation/run_v7.py | run_v7.py | py | 9,829 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "src.data_loader.root_dir",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": ... |
74090896742 | from bs4 import BeautifulSoup
from urllib.request import urlopen
from platform import subprocess
# if genlist = 0, then this script downloads the files, the cmd_downloader variable comes into play
# if genlist = 1, then this script generates a list.txt file containing direct links to music files in the working directory
# the list.txt can be imported in any download manager like IDM , FDM etc to download all files at once with full speed
genlist = 1
cmd_downloader = 'aria2c -x 8 -s 8 -k 3M'
# example of url : http://anime.thehylia.com/soundtracks/album/death-note-original-soundtrack
def run():
url = input('url of soundtrack album \n> ')
response = urlopen(url)
data = response.read()
soup = BeautifulSoup(data, 'lxml') # HTML.parser fails, smart technique hylia
# open('list.html', 'w').write(data.decode())
getsongs( soup.body.find_all('a') )
def getsongs( tds ):
downlist = ''
cur = 1
for i in tds:
link = i['href']
if not ismp3(link):
continue
# download song
response = urlopen(link)
songdata = response.read()
songsoup = BeautifulSoup(songdata, 'lxml')
links = songsoup.body.find_all('a')
for dlink in links:
if not ismp3(dlink['href']):
continue
print('Downloading song #' + str(cur))
if genlist:
downlist += dlink['href'] + '\n'
else:
subprocess.call(cmd_downloader + ' ' + dlink['href'])
break # ehh
cur += 1
if genlist:
open('list.txt', 'w').write(downlist)
def ismp3(link):
if len(link) < 5:
return False
if link[-4:] != '.mp3':
return False
return True
if __name__ == '__main__':
run() | aviaryan/pythons | TheHyliaSoundtrack/hylia_s.py | hylia_s.py | py | 1,588 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "urllib.request.urlopen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlopen",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.B... |
23299064589 | from ext_cloud.BaseCloud.BaseNetworks.BaseGateway import BaseGatewaycls
from boto import vpc
from ext_cloud.AWS.AWSBaseCloud import AWSBaseCloudcls
class AWSGatewaycls(AWSBaseCloudcls, BaseGatewaycls):
__aws_gateway = None
__vpc = None
def __init__(self, *arg, **kwargs):
self.__aws_gateway = arg[0]
self._aws_ref = arg[0]
name = None
if 'name' in self.__aws_gateway.tags:
name = self.__aws_gateway.tags['name']
super(AWSGatewaycls, self).__init__(id=self.__aws_gateway.id, name=name, credentials=kwargs['credentials'])
@AWSBaseCloudcls.name.setter
def name(self, value):
self.addtag('Name', value)
self._name = value
@property
def state(self):
return self.__aws_gateway.state
@property
def __Vpc(self):
return self.__vpc
@__Vpc.getter
def __Vpc(self):
if self.__vpc is None:
self.__vpc = vpc.boto.connect_to_region(self._credentials['region_name'], aws_access_key_id=self._credentials['username'], aws_secret_access_key=self._credentials['password'])
return self.__vpc
def delete(self):
self.__Vpc.delete_internet_gateway(self._id)
| Hawkgirl/ext_cloud | ext_cloud/AWS/AWSNetworks/AWSGateway.py | AWSGateway.py | py | 1,211 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ext_cloud.AWS.AWSBaseCloud.AWSBaseCloudcls",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "ext_cloud.BaseCloud.BaseNetworks.BaseGateway.BaseGatewaycls",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "ext_cloud.AWS.AWSBaseCloud.AWSBaseCloudcls.n... |
1415008124 | from ninja import Router, Query
from ninja.pagination import paginate
import logging
from django.http import Http404
from datetime import datetime
from typing import List
from reservation_system.schemas.room import (
RoomSchema,
CreateRoomSchema,
PatchRoomSchema,
PutRoomSchema,
)
from reservation_system.cruds.room import room_crud
from reservation_system.exceptions.exceptions import RoomObjectDoesNotExist
logger = logging.getLogger(__name__)
router = Router()
@router.post("/create", response={201: RoomSchema}, url_name="create-room")
def create_room(request, room: CreateRoomSchema):
room = room_crud.create(room)
return room
@router.get("/{int:room_id}", response=RoomSchema, url_name="retrieve-room")
def retrieve_room(request, room_id):
room = room_crud.get(room_id)
if not room:
raise RoomObjectDoesNotExist
return room
@router.put("/{int:room_id}", response=CreateRoomSchema, url_name="put-room")
def put_room(request, room_id, obj_in: PutRoomSchema):
room = room_crud.get(room_id)
if not room:
raise RoomObjectDoesNotExist
room = room_crud.update(room, obj_in)
return room
@router.patch("/{int:room_id}", response=CreateRoomSchema, url_name="patch-room")
def patch_room(request, room_id, obj_in: PatchRoomSchema):
room = room_crud.get(room_id)
if not room:
raise RoomObjectDoesNotExist
room = room_crud.update(room, obj_in)
return room
@router.delete("/{int:room_id}", response={204: None}, url_name="patch-room")
def delete_room(request, room_id):
room = room_crud.get(room_id)
if not room:
raise RoomObjectDoesNotExist
room_crud.delete(room_id)
return 204, None
@router.get("/{check_in}/{check_out}/{count}", response=List[RoomSchema], url_name="check-available-room")
def check_available_room(request, check_in: str, check_out: str, count: int):
try:
print("------------")
check_in = datetime.strptime(check_in, "%Y-%m-%d %H:%M")
check_out = datetime.strptime(check_out, "%Y-%m-%d %H:%M")
rooms = room_crud.available_rooms(check_in, check_out, count)
except ValueError:
raise Http404("Invalid datetime format")
return rooms | kamranabdicse/Django-Pydantic-TTD | reservation_system/api/api_v1/controllers/room.py | room.py | py | 2,222 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ninja.Router",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "reservation_system.schemas.room.CreateRoomSchema",
"line_number": 23,
"usage_type": "name"
},
{
"a... |
43245032675 | """
@file util.py
@author Himanshu Mishra
This module contains all the utility functions required by other modules.
All the plotting and displaying features are included with this module.
"""
# Necessary libraries
import os, shutil
import sys
import string
import cv2 as cv # OpenCV library used for image processing
import numpy as np # Numpy used for numerical calculations
import tensorflow as tf
import tensorflow_hub as hub
import matplotlib.pyplot as plt # Matplotlib used for plotting
# Define image size
IMG_SIZE = 224
# Define the batch size, 32 is a good default
BATCH_SIZE = 32
true_labels = {}
letters = []
# --- Clear Dir ---
def clearDir(folder):
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
print('Failed to delete %s. Reason: %s' % (file_path, e))
return
# --- Reading Image ---
def readImage(imageFile):
"""
Reads Image from a file location using
OpenCV.
@params imageFile Image File Location
@returns image as numpy array
"""
image = cv.imread(imageFile)
return image
# --- Displaying Image ---
def displayImage(image, comments="Test Image"):
"""
Displays image in a window using OpenCV.
@params image {numpy array} Image to display
@params comments {string} Sets the title of the image window
"""
cv.imshow(comments, image)
cv.waitKey(0)
cv.destroyAllWindows()
return
# --- Plotting Image ---
def plotImage(image, comments="Test Image", col="gray"):
"""
Plots image using Matplotlib.
@params image {numpy array} Image to plot
@params comments {string} Sets the title of the plot
@params col {string} Color channel of the image (Gray by default)
"""
if col == "gray":
plt.imshow(image, cmap=col)
else:
plt.imshow(image)
plt.title(comments)
plt.axis("off")
return
############### Preprocessing of Image ###############
# --- GrayScale Conversion ---
def toGray(image):
"""
Converts the image to grayscale if not. If already gray skips.
@params image {numpy array} Image to convert to GrayScale
@returns grayscale image
"""
if len(image.shape) > 2:
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
return image
# --- Rescaling Image ---
def rescaleImage(image, shape, interpolation=cv.INTER_LINEAR):
"""
Rescales the given image to the given shape using the given interpolation.
@params image {numpy array} Image to rescale
@params shape {(h,w) tuple} Shape of the final image
@params interpolation {OpenCV method} Interpolation to use (LinearInterpolation by default)
@returns rescaled image
"""
rescale = cv.resize(image, shape, interpolation)
return rescale
# --- Aspect Ratio ---
def getAspectRatio(image):
"""
Finds the aspect ratio (height / width) of the image.
@params image {numpy array} image to process
@returns aspect ratio of the given image
"""
h, w = image.shape[0], image.shape[1]
return h / w
# --- Reshape Image ---
def reshape(image, height=224):
"""
Finds the new shape of the image when height is changed to the given height maintaining the aspect ratio.
@params image {numpy array} image to reshape
@params height {integer} new height of the image (128 by default)
@returns new shape of the image
"""
shape = (height, int(height * getAspectRatio(image)))
return shape
############### Contours ###############
# --- Draw Contours ---
def drawContour(image, cnt, num):
"""
Draws a bounded rectangle around the contour passed and puts the given text into the rectangle.
@params image {numpy array} Image to draw contour on.
@params cnt {numpy array} Contour to bound
@params num {string / integer} Text to put in
"""
M = cv.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# drawing the contour number on the image
cv.putText(image, f"{num + 1}", (cX - 20, cY), cv.FONT_HERSHEY_PLAIN, \
1.0, (255, 128, 0), 1)
x, y, w, h = cv.boundingRect(cnt)
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
return image
def drawAllContours(image, contours):
"""
Draws a bounded rectangle around all the contours passed and puts an index to each.
:param image: {numpy array} image to draw contours on
:param contours: {list} list of all contours to be drawn
:return: image with contours drawn on it
"""
for i, cnt in enumerate(contours):
M = cv.moments(cnt)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
# drawing the contour number on the image
cv.putText(image, f"{i + 1}", (cX - 20, cY), cv.FONT_HERSHEY_PLAIN,\
1.0, (255, 128, 0), 1)
x, y, w, h = cv.boundingRect(cnt)
cv.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 1)
return image
# --- Plot All Lines ---
def plotAllLines(image, lines, comments="All lines"):
"""
Plots all lines
"""
image_rect = rescaleImage(image, reshape(image))
lineCount = 0
for line in lines:
for word in line:
drawContour(image_rect, word, lineCount)
lineCount += 1
displayImage(image_rect, comments)
# --- Plot All Words ---
def plotAllWords(image, lines, comments="Final Result"):
"""
Plots all the words in order.
"""
wordCount = 0
image_rect = rescaleImage(image.copy(), reshape(image))
for words in lines:
for word in words:
drawContour(image_rect, word, wordCount)
wordCount += 1
# plotImage(image_rect, comments)
displayImage(image_rect, comments)
# --- Display All Words ---
def displayAllWords(lines, comments="All Words"):
"""
Displays all the words in reading order one by one.
"""
wordCount = 0
for line in lines:
for word in line:
displayImage(word, str(wordCount))
wordCount += 1
return
# --- Save All Words ---
def saveAllWords(lines, dir):
"""
Saves all the words in the given directory.
"""
wordCount = 0
for line in lines:
for word in line:
wordFile = dir + "/" + str(wordCount) + ".png"
# print(f"Word File: {wordFile}")
# print(os.listdir(dir + "/.."))
cv.imwrite(wordFile, word)
# cv.imwrite()
wordCount += 1
return
# --- Save All Characters ---
def saveAllChars(chars, dir):
"""
Saves all the character images in the given directory.
"""
for i,c in enumerate(chars):
charFile = dir + "/" + str(i) + ".png"
cv.imwrite(charFile, c)
return
# Plotting the graph obtained
def plotGraph(graph):
"""
Plots the given graph.
"""
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(graph)
def segmentImage(image, segment_array, c='g'):
"""
Segments the given image at the positions in the given array.
@param c Color of the segmented lines
"""
for x in segment_array:
image[:,x] = (0,255,0)
displayImage(image, "Segmented Image")
def plotDualGraphs(graph1, graph2, c2='g'):
fig, ax = plt.subplots(figsize=(10, 6))
ax.plot(graph1)
ax.plot(graph2, color=c2)
return fig, ax
# ---------- MODEL ----------
# Importing the model
def load_model(model_path):
"""
Loads a saved model from a specified path.
"""
print(f"Loading saved model from: {model_path}")
model = tf.keras.models.load_model(model_path,
custom_objects={"KerasLayer":hub.KerasLayer})
return model
def process_image(image_path):
"""
Takes an image file path and turns it into a Tensor.
"""
# Read in image file
image = tf.io.read_file(image_path)
# Preprocess the image
# image = preprocess_image(image_path)
# Turn the jpeg image into numerical Tensor with 3 color channels
image = tf.image.decode_jpeg(image, channels=3)
# Convert the colour channel values from 0-255 values to 0-1 values
image = tf.image.convert_image_dtype(image, tf.float32)
# Resize the image to our desired size (224, 224)
image = tf.image.resize(image, size=[IMG_SIZE, IMG_SIZE])
return image
def get_image_label(image_path, label):
"""
Takes an image file path name and the associated label,
processes the image and returns a tuple of (image, label).
"""
image = process_image(image_path)
return image, label
# Create a function to turn data into batches
def create_data_batches(x, y=None, batch_size=BATCH_SIZE, valid_data=False, test_data=False):
"""
Create batches of data out of image (x) and label (y) pairs.
Shuffles the data if it's training data but doesn't shuffle it if it is the validation data.
Also accepts test data as input (no labels)
"""
# If the data is a test dataset, we probably don't have labels
if test_data:
print("Creating test data batches...")
data = tf.data.Dataset.from_tensor_slices(tf.constant(x)) # Only file paths
data_batch = data.map(process_image).batch(batch_size)
return data_batch
elif valid_data:
print("Creating validation data batches...")
data = tf.data.Dataset.from_tensor_slices((tf.constant(x), # file paths
tf.constant(y)))# labels
data_batch = data.map(get_image_label).batch(batch_size)
return data_batch
else:
# If the data is a training dataset, we shuffle it
print("Creating training data batches...")
# Turn filepaths and labels into Tensors
data = tf.data.Dataset.from_tensor_slices((tf.constant(x), # filepaths
tf.constant(y)))# labels
# Shuffling pathnames and labels before mapping image processing function,
# this is done to reduce the time required (less dense data = less time taken).
data = data.shuffle(buffer_size=len(x))
# Create (image, label) tuples (this also turns image path into preprocessed image)
data = data.map(get_image_label)
# Turn the data into batches
data_batch = data.batch(batch_size)
return data_batch
def runparam():
for i, s in enumerate(string.ascii_uppercase):
letters.append(s)
true_labels[s] = np.zeros((26))
true_labels[s][i] = 1
def unroll_label(label):
"""
Utility function used to unroll (get letter) from
the given label
"""
runparam()
return letters[np.argmax(label)]
# Turn prediction probabilities into their respective label (easier to understand)
def get_pred_label(prediction_probabilities):
"""
Turns an array of prediction probabilities into a label.
"""
return unroll_label([np.argmax(prediction_probabilities) == i for i in range(26)])
| bhawesh-source/OCR | engine/util.py | util.py | py | 11,167 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "os.path.isfile",
"line_numbe... |
16036460087 | # References https://github.com/Sayan98/pytorch-segnet/blob/master/src/model.py
# Small segnet version
import torch
import torch.nn as nn
import torch.nn.functional as F
import warnings
warnings.filterwarnings("ignore")
class Conv2dSame(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, bias=True, padding_layer=torch.nn.ReflectionPad2d):
super().__init__()
ka = kernel_size // 2
kb = ka - 1 if kernel_size % 2 == 0 else ka
self.net = torch.nn.Sequential(
padding_layer((ka,kb,ka,kb)),
torch.nn.Conv2d(in_channels, out_channels, kernel_size, bias=bias)
)
def forward(self, x):
return self.net(x)
class Segnet(nn.Module):
def __init__(self, input_channels=3,input_width=480, input_height=360, n_classes=10):
super(Segnet,self).__init__()
self.input_channels = input_channels
self.input_width = input_width
self.input_height = input_height
self.n_classes = n_classes
## Encode
self.conv1 = nn.Conv2d(in_channels=input_channels, out_channels=64, kernel_size=3, padding=1)
self.conv1_bn = nn.BatchNorm2d(64)
self.pool1 = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.conv2 = nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1)
self.conv2_bn = nn.BatchNorm2d(128)
self.pool2 = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.conv3 = nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1)
self.conv3_bn = nn.BatchNorm2d(256)
self.pool3 = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
self.conv4 = nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1)
self.conv4_bn = nn.BatchNorm2d(512)
self.pool4 = torch.nn.MaxPool2d(kernel_size=2, stride=2, return_indices=True)
## Decode
#self.up1 = nn.Upsample(scale_factor=2, mode='bilinear',align_corners=True)
self.decoder1 = nn.ConvTranspose2d(in_channels=512, out_channels=256, kernel_size=3, padding=1)
self.decoder1_bn = nn.BatchNorm2d(256)
#self.up2 = nn.Upsample(scale_factor=2, mode='bilinear',align_corners=True)
self.decoder2 = nn.ConvTranspose2d(in_channels=256, out_channels=128, kernel_size=3, padding=1)
self.decoder2_bn = nn.BatchNorm2d(128)
#self.up3 = nn.Upsample(scale_factor=2, mode='bilinear',align_corners=True)
self.decoder3 = nn.ConvTranspose2d(in_channels=128, out_channels=64, kernel_size=3, padding=1)
self.decoder3_bn = nn.BatchNorm2d(64)
self.decoder4 = nn.ConvTranspose2d(in_channels=64, out_channels=n_classes, kernel_size=3, padding=1)
self.decoder4_bn = nn.BatchNorm2d(n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self,x):
# Encoder phase
conv1 = self.conv1(x)
conv1 = F.relu(self.conv1_bn(conv1))
conv1, ind1 = self.pool1(conv1)
conv2 = self.conv2(conv1)
conv2 = F.relu(self.conv2_bn(conv2))
conv2, ind2 = self.pool2(conv2)
conv3 = self.conv3(conv2)
conv3 = F.relu(self.conv3_bn(conv3))
conv3, ind3 = self.pool3(conv3)
conv4 = self.conv4(conv3)
conv4 = F.relu(self.conv4_bn(conv4))
conv4, ind4 = self.pool4(conv4)
# Decoder phase
decod1 = F.max_unpool2d(conv4, ind4, kernel_size=2, stride=2, output_size=conv3.size())
decod1 = self.decoder1(decod1)
decod1 = F.relu(self.decoder1_bn(decod1))
decod2 = F.max_unpool2d(decod1, ind3, kernel_size=2, stride=2, output_size=conv2.size())
decod2 = self.decoder2(decod2)
decod2 = F.relu(self.decoder2_bn(decod2))
decod3 = F.max_unpool2d(decod2, ind2, kernel_size=2, stride=2, output_size=conv1.size())
decod3 = self.decoder3(decod3)
decod3 = F.relu(self.decoder3_bn(decod3))
decod4 = F.max_unpool2d(decod3, ind1, kernel_size=2, stride=2, output_size=x.size())
decod4 = self.decoder4(decod4)
decod4 = F.relu(self.decoder4_bn(decod4))
output = decod4.view(-1,self.n_classes,self.input_width*self.input_height)
output = self.softmax(output)
return output
if __name__ == "__main__":
import numpy as np
batch_size = 1
n_channels = 3
input_width = 480
input_height = 360
n_classes = 10
nz = torch.Tensor(np.zeros((batch_size,n_channels,input_width,input_height)))
uz = torch.ones(batch_size,input_width*input_height,dtype=torch.long)
model = Segnet()
outputs = model.forward(nz)
criterion = nn.CrossEntropyLoss()
learning_rate = 1e-4
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
loss = criterion(outputs, uz)
loss.backward() | bbrangeo/pytorch-image-segmentation | models/segnet.py | segnet.py | py | 4,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Sequential... |
41797863785 | from flask import render_template, url_for, request, flash, redirect, abort, send_from_directory
from flask_login import current_user
from werkzeug import exceptions
from datetime import datetime
from ics import Calendar as icsCalendar, Event as icsEvent
import os
from threading import Thread
import json
# -------------------------------------------------------------------------------------------------------------- #
# Import app from __init__.py
# -------------------------------------------------------------------------------------------------------------- #
from core import app, current_year, live_site
# -------------------------------------------------------------------------------------------------------------- #
# Import our three database classes and associated forms, decorators etc
# -------------------------------------------------------------------------------------------------------------- #
from core.db_users import update_last_seen, logout_barred_user, login_required, rw_required
from core.db_social import Socials, create_social_form, SOCIAL_FORM_PRIVATE, SOCIAL_DB_PRIVATE, \
SOCIAL_FORM_PUBLIC, SOCIAL_DB_PUBLIC, SIGN_UP_YES, SIGN_UP_NO
from core.dB_events import Event
from core.db_users import User
from core.subs_email_sms import send_social_notification_emails
# -------------------------------------------------------------------------------------------------------------- #
# Constants
# -------------------------------------------------------------------------------------------------------------- #
# Where we store calendar ics files
# "D:/Dropbox/100 Days of Code/Python/ELSR-website/core/ics/"
ICS_DIRECTORY = os.environ['ELSR_ICS_DIRECTORY']
# -------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------- #
# html routes
# -------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------- #
# -------------------------------------------------------------------------------------------------------------- #
# Add a social event
# -------------------------------------------------------------------------------------------------------------- #
@app.route('/add_social', methods=['GET', 'POST'])
@logout_barred_user
@login_required
@update_last_seen
@rw_required
def add_social():
# ----------------------------------------------------------- #
# Did we get passed a social_id? (Optional)
# ----------------------------------------------------------- #
social_id = request.args.get('social_id', None)
# ----------------------------------------------------------- #
# Validate social_id
# ----------------------------------------------------------- #
if social_id:
social = Socials().one_social_id(social_id)
if not social:
app.logger.debug(f"add_social(): Failed to locate social, social_id = '{social_id}'.")
Event().log_event("Add Social Fail", f"Failed to locate social, social_id = '{social_id}'.")
return abort(404)
else:
social = None
# ----------------------------------------------------------- #
# Need a form
# ----------------------------------------------------------- #
form = create_social_form(current_user.admin())
if request.method == 'GET':
if social:
# Try and get owner from email address in the db
owner = User().find_user_from_email(social.email)
if not owner:
# Should never happen but....
app.logger.debug(f"add_social(): Failed to locate owner, "
f"social_id = '{social_id}', social.email = '{social.email}'.")
Event().log_event("Edit Social Fail", f"Failed to locate owner, "
f"social_id = '{social_id}', social.email = '{social.email}'.")
flash("Failed to locate owner, so defaulting to current_user")
# Default to current user
owner = current_user
# We are editing an existing event, so pre-fill details
form.date.data = datetime.strptime(social.date.strip(), '%d%m%Y')
form.start_time.data = datetime.strptime(f"{social.date} {social.start_time}", '%d%m%Y %H%M').time()
form.organiser.data = social.organiser
form.destination.data = social.destination
form.details.data = social.details
if social.privacy == SOCIAL_DB_PRIVATE:
form.destination_hidden.data = SOCIAL_FORM_PRIVATE
else:
form.destination_hidden.data = SOCIAL_FORM_PUBLIC
if social.sign_up == "True":
form.sign_up.data = SIGN_UP_YES
else:
form.sign_up.data = SIGN_UP_NO
# Admin owner option
if current_user.admin():
form.owner.data = owner.combo_str()
else:
# New form, so just assume organiser is current user
form.organiser.data = current_user.name
if current_user.admin():
form.owner.data = current_user.combo_str()
# Add some guidance
form.details.data = "<p>If you set the Social type to <strong>Public</strong>:</p>" \
"<ul><li>Destination and Details are hidden for anyone not logged in.</li>" \
"<li>But, visible to anyone who has registered (ie anyone who can be bothered to give " \
"the site an email address).</li></ul>" \
"<p>However, the Admins maintain a subset of members (same as WA group) and only " \
"these people can see the Destination and Details of <strong>Private</strong> events. " \
"So, <strong>Public</strong> = social down the pub, but BBQ at your house should be " \
"made <strong>Private</strong>!</p>"
elif form.validate_on_submit():
# ----------------------------------------------------------- #
# Handle form passing validation
# ----------------------------------------------------------- #
# Detect cancel button
if form.cancel.data:
return redirect(url_for('calendar'))
# ----------------------------------------------------------- #
# We can now create / update the social object
# ----------------------------------------------------------- #
if social:
# Updating an existing social
new_social = social
else:
# New social
new_social = Socials()
# Get owner
if current_user.admin():
owner = User().user_from_combo_string(form.owner.data)
if not owner:
# Should never happen but....
app.logger.debug(f"add_social(): Failed to locate owner, "
f"social_id = '{social_id}', social.email = '{social.email}'.")
Event().log_event("Edit Social Fail", f"Failed to locate owner, "
f"social_id = '{social_id}', social.email = '{social.email}'.")
flash("Failed to locate owner, so defaulting to current_user")
# Default to current user
owner = current_user
else:
owner = current_user
new_social.organiser = form.organiser.data
new_social.email = owner.email
# Convert form date format '2023-06-23' to preferred format '23062023'
new_social.date = form.date.data.strftime("%d%m%Y")
new_social.start_time = form.start_time.data.strftime("%H%M")
new_social.destination = form.destination.data
new_social.details = form.details.data
# Handle public private
if form.destination_hidden.data == SOCIAL_FORM_PUBLIC:
new_social.privacy = SOCIAL_DB_PUBLIC
else:
new_social.privacy = SOCIAL_DB_PRIVATE
# Handle sign ups
if form.sign_up.data == SIGN_UP_YES:
new_social.sign_up = "True"
else:
new_social.sign_up = "False"
# ----------------------------------------------------------- #
# Add to the db
# ----------------------------------------------------------- #
new_social = Socials().add_social(new_social)
if new_social:
# Success
app.logger.debug(f"add_social(): Successfully added new social.")
Event().log_event("Add social Pass", f"Successfully added new social.")
if social:
flash("Social updated!")
else:
flash("Social added to Calendar!")
Thread(target=send_social_notification_emails, args=(new_social,)).start()
# Back to socials page showing the new social
return redirect(url_for('social', date=new_social.date))
else:
# Should never happen, but...
app.logger.debug(f"add_social(): Failed to add social for '{new_social}'.")
Event().log_event("Add Social Fail", f"Failed to add social for '{new_social}'.")
flash("Sorry, something went wrong.")
return render_template("calendar_add_social.html", year=current_year, form=form, social=social,
live_site=live_site())
elif request.method == 'POST':
# ----------------------------------------------------------- #
# Handle form failing validation
# ----------------------------------------------------------- #
# Detect cancel button
if form.cancel.data:
return redirect(url_for('calendar'))
# This traps a post, but where the form verification failed.
flash("Something was missing, see comments below:")
return render_template("calendar_add_social.html", year=current_year, form=form, social=social,
live_site=live_site())
return render_template("calendar_add_social.html", year=current_year, form=form, social=social,
live_site=live_site())
# -------------------------------------------------------------------------------------------------------------- #
# Show all socials for a given date
# -------------------------------------------------------------------------------------------------------------- #
@app.route("/social", methods=['GET'])
@logout_barred_user
@update_last_seen
def social():
# ----------------------------------------------------------- #
# Did we get passed a date? (optional)
# ----------------------------------------------------------- #
date = request.args.get('date', None)
anchor = request.args.get('anchor', None)
# ----------------------------------------------------------- #
# Get our socials
# ----------------------------------------------------------- #
if date:
# Get socials specific to that date
socials = Socials().all_socials_date(date)
else:
# Just get ones yet to happen
socials = Socials().all_socials_future()
# ----------------------------------------------------------- #
# Tweak the data before we show it
# ----------------------------------------------------------- #
for social in socials:
# Convert attendees from string to list
if social.attendees:
social.attendees = json.loads(social.attendees)
else:
social.attendees = []
# Swap 'True' / 'False' in db for boolean for jinja
social.sign_up = social.sign_up == "True"
# Add more friendly start time
if social.start_time:
social.start_time_txt = f"{social.start_time[0:2]}:{social.start_time[2:4]}"
else:
social.start_time_txt = "TBC"
# Hide destination for private events
social.show_ics = False
if not current_user.is_authenticated:
# Not logged in = no info other than the date
social.destination = "Log in to see destination"
social.start_time_txt = "Log in to see start time"
social.details = f"<a href={url_for('login')}><p style='color: red'>Log in to see the details</p></a>"
elif social.privacy == SOCIAL_DB_PRIVATE and \
not current_user.readwrite():
# Private events are for write enabled users only ie WA group members
social.destination = "** Private event **"
social.details = "<p>Details for private events are visible to regular riders only.</p>"
social.start_time_txt = "** Private event **"
else:
social.show_ics = True
return render_template("main_social.html", year=current_year, socials=socials, date=date, live_site=live_site(),
anchor=anchor)
# -------------------------------------------------------------------------------------------------------------- #
# Add a social event
# -------------------------------------------------------------------------------------------------------------- #
@app.route('/delete_social', methods=['POST'])
@logout_barred_user
@login_required
@update_last_seen
@rw_required
def delete_social():
# ----------------------------------------------------------- #
# Did we get passed a social_id?
# ----------------------------------------------------------- #
social_id = request.args.get('social_id', None)
try:
password = request.form['password']
except exceptions.BadRequestKeyError:
password = None
# Stop 400 error for blank string as very confusing (it's not missing, it's blank)
if password == "":
password = " "
# ----------------------------------------------------------- #
# Get user's IP
# ----------------------------------------------------------- #
if request.headers.getlist("X-Forwarded-For"):
user_ip = request.headers.getlist("X-Forwarded-For")[0]
else:
user_ip = request.remote_addr
# ----------------------------------------------------------- #
# Must have parameters
# ----------------------------------------------------------- #
if not social_id:
app.logger.debug(f"delete_social(): Missing social_id!")
Event().log_event("Delete social Fail", f"Missing social_id!")
return abort(400)
if not password:
app.logger.debug(f"delete_social(): Missing Password!")
Event().log_event("Delete social Fail", f"Missing Password!")
return abort(400)
# ----------------------------------------------------------- #
# Validate social_id
# ----------------------------------------------------------- #
social = Socials().one_social_id(social_id)
if not social:
app.logger.debug(f"delete_social(): Failed to locate social, social_id = '{social_id}'.")
Event().log_event("Delete Social Fail", f"Failed to locate social, social_id = '{social_id}'.")
return abort(404)
# ----------------------------------------------------------- #
# Restrict access to Admin and Author
# ----------------------------------------------------------- #
# Must be admin or the current author
if current_user.email != social.email \
and not current_user.admin():
# Failed authentication
app.logger.debug(f"delete_social(): Refusing permission for '{current_user.email}' and "
f"social_id = '{social_id}'.")
Event().log_event("Delete Social Fail", f"Refusing permission for '{current_user.email}', "
f"social_id = '{social_id}'.")
return abort(403)
# ----------------------------------------------------------- #
# Validate password
# ----------------------------------------------------------- #
# Need current user
user = User().find_user_from_id(current_user.id)
# Validate against current_user's password
if not user.validate_password(user, password, user_ip):
app.logger.debug(f"delete_social(): Delete failed, incorrect password for user_id = '{user.id}'!")
Event().log_event("Social Delete Fail", f"Incorrect password for user_id = '{user.id}'!")
flash(f"Incorrect password for user {user.name}!")
# Go back to socials page
return redirect(url_for('social'))
# ----------------------------------------------------------- #
# Delete social
# ----------------------------------------------------------- #
if Socials().delete_social(social_id):
app.logger.debug(f"delete_social(): Deleted social, social_id = '{social_id}'.")
Event().log_event("Delete Social Success", f"Deleted social, social_id = '{social_id}'.")
flash("Social has been deleted.")
else:
app.logger.debug(f"delete_social(): Failed to delete social, social_id = '{social_id}'.")
Event().log_event("Delete Social Fail", f"Failed to delete social, social_id = '{social_id}'.")
flash("Sorry, something went wrong.")
return redirect(url_for('social'))
# -------------------------------------------------------------------------------------------------------------- #
# Download ics file
# -------------------------------------------------------------------------------------------------------------- #
@app.route('/download_ics', methods=['GET'])
@logout_barred_user
@login_required
@update_last_seen
def download_ics():
# ----------------------------------------------------------- #
# Did we get passed a social_id?
# ----------------------------------------------------------- #
social_id = request.args.get('social_id', None)
# ----------------------------------------------------------- #
# Must have parameters
# ----------------------------------------------------------- #
if not social_id:
app.logger.debug(f"download_ics(): Missing social_id!")
Event().log_event("download_ics Fail", f"Missing social_id!")
return abort(400)
# ----------------------------------------------------------- #
# Validate social_id
# ----------------------------------------------------------- #
social = Socials().one_social_id(social_id)
if not social:
app.logger.debug(f"download_ics(): Failed to locate social, social_id = '{social_id}'.")
Event().log_event("download_ics Fail", f"Failed to locate social, social_id = '{social_id}'.")
return abort(404)
# ----------------------------------------------------------- #
# Permissions
# ----------------------------------------------------------- #
if not current_user.readwrite() and \
social.privacy == SOCIAL_DB_PRIVATE:
# Failed authentication
app.logger.debug(f"delete_social(): Refusing permission for '{current_user.email}' and "
f"social_id = '{social_id}' as Private.")
Event().log_event("Delete SocialX Fail", f"Refusing permission for '{current_user.email}', "
f"social_id = '{social_id}' as Private.")
flash("Private events are for regular riders only!")
return redirect(url_for("not_rw"))
# ----------------------------------------------------------- #
# Create ics file
# ----------------------------------------------------------- #
new_event = icsEvent()
new_event.name = "ELSR Social"
new_event.begin = f"{social.date[4:8]}-{social.date[2:4]}-{social.date[0:2]} " \
f"{social.start_time[0:2]}:{social.start_time[2:4]}:00"
new_event.location = social.destination
new_event.description = f"ELSR Social organised by {social.organiser}: {social.details} \n\n " \
f"https://www.elsr.co.uk/social"
# Add to ics calendar
new_cal = icsCalendar()
new_cal.events.add(new_event)
# Save as file
filename = os.path.join(ICS_DIRECTORY, f"Social_{social.date}.ics")
with open(filename, 'w') as my_file:
my_file.writelines(new_cal.serialize_iter())
# ----------------------------------------------------------- #
# Send link to download the file
# ----------------------------------------------------------- #
download_name = f"ELSR_Social_{social.date}.ics"
app.logger.debug(f"download_ics(): Serving ICS social_id = '{social_id}' ({social.date}), "
f"download_name = '{download_name}'.")
Event().log_event("ICS Downloaded", f"Serving ICS social_idd = '{social_id}' ({social.date}).")
return send_from_directory(directory=ICS_DIRECTORY,
path=os.path.basename(filename),
download_name=download_name)
| footflaps/ELSR-Website | core/routes_socials.py | routes_socials.py | py | 21,479 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "flask.request.args.get",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 60,
"usage_type": "attribute"
},
{
"api_name": "flask.... |
5063223720 | __all__ = [
'AuthorizationView',
'RevokeTokenView',
'TokenView',
'IntrospectTokenView',
]
from calendar import timegm
from oauth2_provider.views import (
AuthorizationView,
RevokeTokenView,
TokenView,
)
from rest_framework import (
generics,
response,
status,
)
from ..authentication import (
verify as auth_verify,
)
from ..permissions import (
TokenHasScope,
)
from .serializers import (
IntrospectTokenSerializer,
)
class IntrospectTokenView(generics.GenericAPIView):
permission_classes = [TokenHasScope]
serializer_class = IntrospectTokenSerializer
required_scopes = ['introspection']
def post(self, request):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, access_token = auth_verify(
serializer.validated_data['token'])
if not access_token:
return response.Response(
{'error': 'user not found'},
status=status.HTTP_404_NOT_FOUND,
)
serializer.save(
exp=int(timegm(access_token.expires.utctimetuple())),
scope=access_token.scope,
user={
'uuid': user.uuid,
'username': user.realm_username,
'realm': user.realm,
},
)
return response.Response(serializer.data)
| mind-bricks/UMS-api | apps/oauth/views.py | views.py | py | 1,410 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.generics.GenericAPIView",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "permissions.TokenHasScope",
"line_number": 33,
"usage_type": "name"
... |
31981036961 | """empty message
Revision ID: 01368a3a906b
Revises: 5e10a128f4c9
Create Date: 2020-09-14 12:46:33.026557
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '01368a3a906b'
down_revision = '5e10a128f4c9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('member_type',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.TEXT(), nullable=False),
sa.Column('label', sa.TEXT(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.drop_table('member')
op.add_column('user_profile', sa.Column('member_number', sa.TEXT(), nullable=True))
op.drop_constraint('user_profile_member_id_fkey', 'user_profile', type_='foreignkey')
op.create_foreign_key(None, 'user_profile', 'member_type', ['member_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'user_profile', type_='foreignkey')
op.create_foreign_key('user_profile_member_id_fkey', 'user_profile', 'member', ['member_id'], ['id'])
op.drop_column('user_profile', 'member_number')
op.create_table('member',
sa.Column('id', sa.INTEGER(), autoincrement=True, nullable=False),
sa.Column('name', sa.TEXT(), autoincrement=False, nullable=False),
sa.Column('label', sa.TEXT(), autoincrement=False, nullable=False),
sa.PrimaryKeyConstraint('id', name='member_pkey')
)
op.drop_table('member_type')
# ### end Alembic commands ###
| OsamuHasegawa/society-portal | migrations/versions/01368a3a906b_.py | 01368a3a906b_.py | py | 1,614 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "alembic.op.create_table",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "alembic.op",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integ... |
39472224381 | from sqlalchemy import func, ForeignKey
from sqlalchemy.orm import relationship, declared_attr
from models.comments import CommentsModel
from db import db
from models.enums import RoleType
from models.orders import OrdersModel
class UsersModel(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), nullable=False, unique=True)
password = db.Column(db.String(255), nullable=False)
role = db.Column(db.Enum(RoleType), default=RoleType.user, nullable=False)
user_data = db.relationship(
"UserData", backref="users", uselist=False, lazy="select"
)
wishes_relation = db.Table(
"wishes",
db.Column("user_id", db.Integer, db.ForeignKey("user_data.id"), primary_key=True),
db.Column("product_id", db.Integer, db.ForeignKey("products.id"), primary_key=True),
)
class UserData(db.Model):
__tablename__ = "user_data"
id = db.Column(db.Integer, primary_key=True)
f_name = db.Column(db.String(255), nullable=False)
l_name = db.Column(db.String(255), nullable=False)
phone = db.Column(db.Integer, nullable=False)
created_on = db.Column(db.DateTime, server_default=func.now())
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
wishes = db.relationship(
"ProductsModel",
secondary=wishes_relation,
lazy="dynamic",
backref=db.backref("users", lazy=True),
)
comments = db.relationship("CommentsModel", backref="user", lazy="select")
orders = db.relationship("OrdersModel", backref="user", lazy="select")
| a-angeliev/Shoecommerce | server/models/users.py | users.py | py | 1,602 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "db.db.Model",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "db.db",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "db.db.Column",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "db.db",
"line_number": 11,
... |
32754344712 | from django.contrib import admin
from django.urls import path
from django.contrib.auth import views as auth_views
from rafacar.views import home
from . import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', home), # tela inicio
path('home', home, name='home'),
path('contato/', views.contato, name='contato'),
path('sobre/', views.sobre, name='sobre'),
path('servicos/', views.servicos, name='servicos'),
path('login/', views.login_user, name='login'),
path('cadastro/', views.cadastrar_usuario, name='cadastro'),
path('agendar/', views.agendar, name='agendar'),
path('agendamentos/', views.agendamentos, name='agendamentos'),
path('area_membros/', views.area_membros, name='area_membros'),
path('agendamentos/excluir/<int:agendamento_id>/', views.excluir_agendamento, name='excluir_agendamento'),
path('agendamentos/editar/<int:agendamento_id>/', views.editar_agendamento, name='editar_agendamento'),
path('edita_user/', views.edita_user, name='edita_user'),
path('logout/', views.logout_view, name='logout'),
]
| HenriqueJunio/tcc_rafacar | rafacar/urls.py | urls.py | py | 1,097 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "... |
2036893082 | import click_log
import logging
class MyFormatter(click_log.ColorFormatter):
def format(self, record):
msg = click_log.ColorFormatter.format(self, record)
if record.levelname in ("DEBUG", "INFO"):
new_msg = "> " + msg
elif record.levelname in ("WARNING"):
new_msg = ">> " + msg
elif record.levelname in ("ERROR", "CRITICAL"):
new_msg = ">>> " + msg
else:
new_msg = msg
return new_msg
clog = logging.getLogger(__name__)
myhandler = click_log.ClickHandler()
myhandler.formatter = MyFormatter()
clog.handlers = [myhandler]
clog.propagate = False
| nuvolos-cloud/resolos | resolos/logging.py | logging.py | py | 647 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "click_log.ColorFormatter",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "click_log.ColorFormatter.format",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "click_log.ColorFormatter",
"line_number": 7,
"usage_type": "attribute"
},
... |
22321037083 | """
Author: Zhuo Su, Wenzhe Liu
Date: Feb 18, 2021
"""
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn as nn
import torch
from torch.nn import functional as F
from torchvision import models
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import functools
from torch.optim import lr_scheduler
###############################################################################
# Helper Functions
###############################################################################
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def make_layers(cfg, in_channels = 3, batch_norm=False, dilation = False):
if dilation:
d_rate = 2
else:
d_rate = 1
layers = []
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=d_rate,dilation = d_rate)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
class block(nn.Module):
"""
Compact Dilation Convolution based Module
"""
def __init__(self, in_channels):
super(block, self).__init__()
self.conv0_0 = conv_layers(in_channels, in_channels,1)
self.conv0_1 = conv_layers(in_channels, in_channels,2)
self.pool0 = pool_layers()
self.poollast = pool_layers()
self.conv1_0 = conv_layers(in_channels, in_channels,2)
self.conv1_1 = conv_layers(in_channels, in_channels,2)
self.pool1 = pool_layers()
self.conv2_0 = conv_layers(in_channels, in_channels,2)
self.conv2_1 = conv_layers(in_channels, in_channels,2)
self.conv2_2 = conv_layers(in_channels, in_channels,2)
self.pool2 = pool_layers()
self.conv3_0 = conv_layers(in_channels, in_channels,2)
self.conv3_1 = conv_layers(in_channels, in_channels,2)
self.conv3_2 = conv_layers(in_channels, in_channels,2)
self.classifier = nn.Conv2d(in_channels*3, 1, kernel_size=1)
def forward(self, x):
H, W = x.size()[2:]
x = self.conv0_0(x)
x = self.conv0_1(x)
x = self.pool0(x)
x = self.conv1_0(x)
x = self.conv1_1(x)
e1 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
x = self.pool1(x)
x = self.conv2_0(x)
x = self.conv2_1(x)
x = self.conv2_2(x)
e2 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
x = self.pool2(x)
x = self.conv3_0(x)
x = self.conv3_1(x)
x = self.conv3_2(x)
e3 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
return e1+e2+e3
class blockbn(nn.Module):
"""
Compact Dilation Convolution based Module
"""
def __init__(self, in_channels):
super(blockbn, self).__init__()
#self.norm_layer = get_norm_layer(norm_type='batch')
norm_layer = get_norm_layer(norm_type='batch')
self.conv0_0 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv0_1 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.pool0 = pool_layers()
self.conv1_0 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv1_1 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.pool1 = pool_layers()
self.conv2_0 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv2_1 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv2_2 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.pool2 = pool_layers()
self.conv3_0 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv3_1 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.conv3_2 = nn.Sequential(*self._conv_block(in_channels, in_channels, norm_layer, num_block=1))
self.classifier = nn.Conv2d(in_channels*3, 1, kernel_size=1)
def _conv_block(self, in_nc, out_nc, norm_layer, num_block=1, kernel_size=3,
stride=1, padding=1, bias=False):
conv = []
for i in range(num_block):
cur_in_nc = in_nc if i == 0 else out_nc
conv += [nn.Conv2d(cur_in_nc, out_nc, kernel_size=kernel_size, stride=stride,
padding=padding, bias=bias),
norm_layer(out_nc),
nn.ReLU(True)]
return conv
def forward(self, x):
H, W = x.size()[2:]
x = self.conv0_0(x)
x = self.conv0_1(x)
x = self.pool0(x)
x = self.conv1_0(x)
x = self.conv1_1(x)
e1 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
x = self.pool1(x)
x = self.conv2_0(x)
x = self.conv2_1(x)
x = self.conv2_2(x)
e2 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
x = self.pool2(x)
x = self.conv3_0(x)
x = self.conv3_1(x)
x = self.conv3_2(x)
e3 = F.interpolate(x, (H, W), mode="bilinear", align_corners=False)
return e1+e2+e3
def conv_layers(inp, oup, dilation):
#if dilation:
d_rate = dilation
#else:
# d_rate = 1
return nn.Sequential(
nn.Conv2d(inp, oup, kernel_size=3, padding=d_rate, dilation=d_rate),
nn.ReLU(inplace=True)
)
def feature_transform(inp, oup):
conv2d = nn.Conv2d(inp, oup, kernel_size=1) # no padding
relu = nn.ReLU(inplace=True)
layers = []
layers += [conv2d, relu]
return nn.Sequential(*layers)
def pool_layers(ceil_mode=True):
return nn.MaxPool2d(kernel_size=3, stride=2)
class CHRNet(nn.Module): # one conv layer after fully connetced CNN _average3
def __init__(self):
super(CHRNet, self).__init__()
self.seen = 0
self.conv0_0 = conv_layers(3, 16,4)
self.pool00 = pool_layers()
self.conv0_1 = conv_layers(16, 16,4)
self.pool0 = block(16)
self.pool0_bn = blockbn(16)
self.conv1_0 = conv_layers(16, 32,4)
self.conv1_1 = conv_layers(32, 32,4)
self.pool1 = block(32)
self.pool1_bn = blockbn(32)
self.conv2_0 = conv_layers(32, 64,4)
self.conv2_1 = conv_layers(64, 64,4)
self.conv2_2 = conv_layers(64, 64,4)
self.pool2 = block(64)
self.pool2_bn = blockbn(64)
self.conv3_0 = conv_layers(64, 128,4)
self.conv3_1 = conv_layers(128, 128,4)
self.conv3_2 = conv_layers(128,128,4)
self.output_layer0 = nn.Conv2d(16, 1, kernel_size=1)
self.output_layer1 = nn.Conv2d(32, 1, kernel_size=1)
self.output_layer2 = nn.Conv2d(64, 1, kernel_size=1)
self.output_layer3 = nn.Conv2d(256, 1, kernel_size=1)
self.output_layer4 = nn.Conv2d(128, 1, kernel_size=1)
self.classifier1 = nn.Conv2d(2, 1, kernel_size=1)
self.classifier4 = nn.Conv2d(4, 1, kernel_size=1)
self.classifier= nn.Conv2d(6, 1, kernel_size=1)
#self._initialize_weights()
self.features = []
def get_weights(self):
conv_weights = []
bn_weights = []
relu_weights = []
for pname, p in self.named_parameters():
if 'bn' in pname:
bn_weights.append(p)
elif 'relu' in pname:
relu_weights.append(p)
else:
conv_weights.append(p)
return conv_weights, bn_weights, relu_weights
def forward(self, x):
self.features = []
H, W = x.size()[2:]
x = self.conv0_0(x)
x = self.pool00(x)
x = self.conv0_1(x)
x1 = self.pool0(x)
x = self.conv1_0(x+x1)
y1 = self.conv1_1(x)
e11 = F.interpolate(y1, (H, W), mode="bilinear")
x2_bn = self.pool1_bn(y1)
e22 = F.interpolate(x2_bn, (H, W), mode="bilinear")
x = self.conv2_0(x2_bn+y1)
x = self.conv2_1(x)
y2 = self.conv2_2(x)
x3_bn = self.pool2_bn(y2)
e33 = F.interpolate(x3_bn, (H, W), mode="bilinear")
x = self.conv3_0(x3_bn+y2)
y3 = self.conv3_1(x)
e3 = F.interpolate(y3, (H, W), mode="bilinear")
e1 = F.interpolate(y1, (H, W), mode="bilinear")
e2 = F.interpolate(y2, (H, W), mode="bilinear")
e1=self.output_layer1(e1)
e11=self.output_layer1(e11)
e2=self.output_layer2(e2)
e22=self.output_layer1(e22)
e3= self.output_layer4(e3)
e33=self.output_layer2(e33)
outputs = [e1,e11,e2,e22,e3,e33]
x = self.classifier(torch.cat(outputs, dim=1))
outputs.append(x)
outputs = [torch.sigmoid(r) for r in outputs]
return outputs
def chrnet(args):
return CHRNet()
| elharroussomar/Refined-Edge-Detection-With-Cascaded-and-High-Resolution-Convolutional-Network | models/chrnet.py | chrnet.py | py | 10,342 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "functools.partial",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "torch.nn.BatchNorm2d",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "functools.partia... |
73118934184 | import datetime as dt
def workdays_count(date1, date2):
delta = date2 - date1
ddays = delta.days
print(f"Всего дней между ними {ddays}")
workdays = 0
while ddays > 0:
if date1.weekday() < 5:
workdays += 1
date1 += dt.timedelta(days=1)
ddays -= 1
return workdays
date1 = dt.date(2023, 2, 18)
date2_now = dt.datetime.now()
date2 = date2_now.date() # странно почему не получается сделать сразу так? : date2 = dt.datetime.now().date()
print(f"Всего рабочих дней между датами: {workdays_count(date1, date2)}")
| IlyaOrlov/PythonCourse2.0_September23 | Practice/mtroshin/Module 10/2.py | 2.py | py | 654 | python | ru | code | 2 | github-code | 36 | [
{
"api_name": "datetime.timedelta",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
7950443823 | import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
import argparse
from argparse import ArgumentParser
from Bio import SeqIO
import numpy as np
import os
import pandas as pd
import pickle
import multiprocessing
NR_THREADS = 24
SIGNAL_EVALUATION_RADIUS = 500
SEQUENCE_EVALUATION_RADIUS = 500
SEQTK_PATH = '/Users/igtr4848/seqtk/seqtk'
SEQ_DUMP_DIR = '/scratch/Shares/dowell/itripodi/ATAC/peak_sequences'
OUTDIR = '/scratch/Shares/dowell/itripodi/ATAC/features'
parser = argparse.ArgumentParser(description='This script gathers all features that can be used to classify ATAC-seq peaks')
parser.add_argument('-x', '--prefix', dest='prefix', help='Custom prefix for all output files')
parser.add_argument('-p', '--atac-peaks', dest='atac_peaks', help='Path to the ATAC-seq peaks file')
parser.add_argument('-b', '--atac-bedgraph', dest='atac_bedgraph', help='Path to the ATAC-seq bedGraph file for the same sample of the given peaks file.')
parser.add_argument('-f', '--genome-fasta', dest='genome_fasta', help='Path to the FASTA file for the reference genome in use.')
parser.add_argument('-r', '--refseq-annotations', dest='refseq_annotations', help='Path to the RefSeq annotation file for the same reference genome these files were processed.')
parser.add_argument('-s', '--skip-seq-caching', dest='skip_sequence_caching', help='Skip the caching of peak sequences if this has already been processed.', required=False, action='store_true')
args = parser.parse_args() # /scratch/Shares/dowell/nascent/hg38/hg38_refseq.bed
atac_peak_df = pd.read_csv(args.atac_peaks, header=None, \
sep="\t", na_filter=False, \
usecols=[0, 1, 2, 10], \
names=['chrom', 'start', 'end', 'ovlp_txn'], \
dtype={'chrom':'str', 'start':'int', \
'end':'int', 'ovlp_txn':'int'} )
atac_bed_df = pd.read_csv(args.atac_bedgraph, header=None, \
sep="\t", na_filter=False, \
usecols=[0, 1, 2, 3], \
names=['chrom', 'start', 'end', 'reads'], \
dtype={'chrom':'str', 'start':'int', \
'end':'int', 'reads':'float'} )
refseq_df = pd.read_csv(args.refseq_annotations, header=None, \
sep="\t", na_filter=False, \
usecols=[0, 1, 2, 5], \
names=['chrom', 'start', 'end', 'strand'], \
dtype={'chrom':'str', 'start':'int', \
'end':'int', 'strand':'str'} )
def get_atac_features(current_chrom):
print("Processing peaks from chromosome %s" % current_chrom)
chrom_features = []
last_peak_end = 0
ovlp_iter = atac_peak_df[(atac_peak_df.chrom == current_chrom)].itertuples()
for peak in ovlp_iter:
signal_features = np.zeros(2 * SIGNAL_EVALUATION_RADIUS)
peak_midpoint = peak.start + (peak.end - peak.start)/2
region_start = peak_midpoint - SIGNAL_EVALUATION_RADIUS
region_end = peak_midpoint + SIGNAL_EVALUATION_RADIUS
peak_window_reads = np.zeros(peak.end - peak.start)
# Get the number of reads for every nucleotide +/- the SIGNAL_EVALUATION_RADIUS
#wide_signal_iter = atac_bed_df[(atac_bed_df['chrom'] == current_chrom) & (atac_bed_df['start'] >= region_start) & (atac_bed_df['end'] <= region_end)].itertuples()
wide_signal_iter = atac_bed_df.query('chrom == "%s" and start >= %d and end <= %d' % (current_chrom, region_start, region_end)).itertuples()
for coverage in wide_signal_iter:
feat_start = coverage.start - region_start + 1
feat_end = coverage.end - region_start
for position in range(int(feat_start), int(feat_end)):
signal_features[position] = coverage.reads
# Get the mean number of reads for every nucleotide within the peak boundaries
peak_signal_iter = atac_bed_df.query('chrom == "%s" and start >= %d and end <= %d' % (current_chrom, peak.start, peak.end)).itertuples()
for coverage in peak_signal_iter:
feat_start = coverage.start - peak.start + 1
feat_end = coverage.end - peak.start
for position in range(int(feat_start), int(feat_end)):
peak_window_reads[position] = coverage.reads
# Gather other peak attributes
dist_from_last_peak = 0
if last_peak_end > 0:
dist_from_last_peak = peak.start - last_peak_end
last_peak_end = peak.end
peak_width = peak.end - peak.start
# Check whether it overlaps a promoter
promoter_overlaps = len(refseq_df.query('chrom == "%s" and ((strand == "+" and %d < start and %d > start) or (strand == "-" and %d < end and %d > end))' % (current_chrom, peak.start, peak.end, peak.start, peak.end)))
# Get sequence-based features
sequence_row = seq_df.query('chrom == "%s" and start < %d and end > %d' % (current_chrom, peak_midpoint, peak_midpoint))
if len(sequence_row) > 0:
sequence = sequence_row.iloc[0].seq
gc_count = 0
for nucleotide in sequence:
if nucleotide == 'C' or nucleotide == 'G' or nucleotide == 'S':
gc_count += 1
gc_ratio = float(gc_count) / (2 * SEQUENCE_EVALUATION_RADIUS)
else:
print("Could not find sequence information for peak %s:%s-%s ... skipping peak" % (current_chrom, peak.start, peak.end))
continue
overlaps_nascent_txn = 0
if peak.ovlp_txn > 0:
overlaps_nascent_txn = 1
chrom_features.append( { 'chrom': current_chrom, \
'start': peak.start, \
'end': peak.end, \
'ovlp_txn': overlaps_nascent_txn, \
'prom_ovlp': promoter_overlaps, \
'width': peak_width, \
'mean_nr_reads': np.mean(peak_window_reads), \
'max_reads': np.max(peak_window_reads), \
'min_reads': np.min(peak_window_reads), \
'dist_from_last_peak': dist_from_last_peak, \
'gc_ratio': gc_ratio,
'sequence': sequence,
'signal_features': signal_features } )
last_peak_end = peak.end
return np.array(chrom_features)
# easy way to create an empty pandas dataframe from
# https://stackoverflow.com/questions/36462257/create-empty-dataframe-in-pandas-specifying-column-types#36463086
def df_empty(columns, dtypes, index=None):
assert len(columns)==len(dtypes)
df = pd.DataFrame(index=index)
for c,d in zip(columns, dtypes):
df[c] = pd.Series(dtype=d)
return df
if __name__=='__main__':
CHROMOSOMES = [ 'chr1', 'chr2', 'chr3', 'chr4', 'chr5', 'chr6', 'chr7', 'chr8', \
'chr9', 'chr10', 'chr11', 'chr12', 'chr13', 'chr14', 'chr15', \
'chr16', 'chr17', 'chr18', 'chr19', 'chr20', 'chr21', 'chr22', \
'chrX', 'chrY']
atac_peaks_file = args.atac_peaks.split('/')[-1].split('.')[0]
if args.skip_sequence_caching: # don't waste cycles if this has already been cached
seq_df = pd.read_pickle('%s/%s_seq_pandas_dataframe.pickle' % (SEQ_DUMP_DIR, atac_peaks_file))
else:
print('Gathering the actual sequence overlapping each peak:')
# Generate a temp bed file with fixed windows for each peak
tmp_peaks_filename = '%s/%s_tmp_fixed_peak_windows.bed' % (SEQ_DUMP_DIR, atac_peaks_file)
with open(tmp_peaks_filename, 'w') as tmp_peaks:
for peak in atac_peak_df.itertuples():
if peak.chrom in CHROMOSOMES:
peak_midpoint = peak.start + (peak.end - peak.start)/2
tmp_peaks.write("%s\t%d\t%d\n" % (peak.chrom, \
peak_midpoint - SEQUENCE_EVALUATION_RADIUS, \
peak_midpoint + SEQUENCE_EVALUATION_RADIUS))
# Gather the sequence for all those fixed-width regions
sequence = os.popen('%s subseq %s %s > %s/%s.fa' % (SEQTK_PATH, args.genome_fasta, tmp_peaks_filename, SEQ_DUMP_DIR, atac_peaks_file)).read()
fasta_sequences = SeqIO.parse(open('%s/%s.fa' % (SEQ_DUMP_DIR, atac_peaks_file)),'fasta')
# Create a dataframe to access these more efficiently
seq_df = df_empty(['chrom', 'start', 'end', 'seq'], [np.str, np.int, np.int, np.str])
for seq in fasta_sequences:
try:
chrom, coordinates = seq.id.split(':')
start = coordinates.split('-')[0]
end = coordinates.split('-')[1]
seq_df.loc[len(seq_df)] = [chrom, int(start), int(end), str(seq.seq)]
except Exception as e:
print("Something went sideways parsing the FASTA:")
print(seq)
print('-------------------------------------------')
print(e)
raise
seq_df.to_pickle('%s/%s_seq_pandas_dataframe.pickle' % (SEQ_DUMP_DIR, atac_peaks_file))
pool = multiprocessing.Pool(NR_THREADS)
X_and_y_train = pool.map(get_atac_features, CHROMOSOMES)
pool.close()
pool.join()
data = [j for sub in X_and_y_train for j in sub]
data = pd.DataFrame(data)
data['signal_features'] = data['signal_features'].map(lambda x: x.astype('float32'))
data['ovlp_txn'] = data['ovlp_txn'].astype('int')
data['mean_nr_reads'] = data['mean_nr_reads'].astype('float32')
data['gc_ratio'] = data['gc_ratio'].astype('float32')
data.to_pickle('%s/%s_from-fstitchtfit.pk' % (OUTDIR, args.prefix))
| Dowell-Lab/OCR_transcription_detection | get_overlaps.py | get_overlaps.py | py | 10,100 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "warnings.filterwarnings",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": ... |
16408236851 | """Filter chat messages"""
import logging
_LOG = logging.getLogger(__name__)
import discord
import doosbot.client
def init(client: doosbot.client.DoosBotClient, tree: discord.app_commands.CommandTree):
@client.event
async def on_message(message: discord.message.Message):
_LOG.info(f"MESSAGE { message.author.display_name }: { message.content }")
try:
if("kom" in message.content and not "soep" in message.content):
_LOG.info(f"KOM SOEP module triggered by { message.author.name }")
for character in "SOEP":
emoji_character = chr(ord(character) + ord("🇦") - ord("A"))
await message.add_reaction(emoji_character)
except Exception as e:
_LOG.error(f"Error handling chat message: { e }")
try:
await message.reply(f"Oeps, DoosBot went full-krak: `{ e }`")
except:
_LOG.error(f"Error while sending the error report to the channel") | PimDoos/DoosBotPy | doosbot/modules/chat.py | chat.py | py | 881 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "doosbot.client.client",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "doosbot.client",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "discord.app_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.