seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39100516740 | import random
class Student:
def __init__(self, name):
self.name = name
self.gladness = 50
self.progress = 0
self.money = 0
self.alive = True
def to_study(self):
print('Time to study!')
self.progress += 0.15
self.gladness -= 3
def to_sleep(self):
print('I am going to sleep!')
self.gladness += 3
def to_chill(self):
print('Resting time!')
self.gladness += 5
self.money -= 10
self.progress -= 0.2
def to_work(self):
print('Time to work!')
self.money += 50
self.gladness -= 5
def update_student_state(self):
if self.money < 0:
print("I don't have enough money, I have to work!")
self.to_work()
elif self.progress < 0:
print('I have problems with my progress, I have to study!')
self.to_study()
def is_alive(self):
if self.progress < -0.5:
print('Cast out...')
self.alive = False
elif self.gladness <= 0:
print('Depression...')
self.alive = False
elif self.progress > 5:
print('Passed externaly...')
self.alive = False
def end_of_day(self):
print(f'Progress = {self.progress}')
print(f'Gladness = {self.gladness}')
print(f'Money = {self.money}')
def live(self, day):
day = 'Day ' + str(day) + " of " + self.name + "'s life"
print(f"{day:=^50}")
live_cube = random.randint(1, 4)
if live_cube == 1:
self.to_study()
elif live_cube == 2:
self.to_sleep()
elif live_cube == 3:
self.to_chill()
else:
self.to_work()
self.update_student_state()
self.is_alive()
self.end_of_day()
nick = Student(name='Nick')
for day in range(365):
if not nick.alive:
break
nick.live(day)
| MishaTret/homework2 | main.py | main.py | py | 1,968 | python | en | code | 0 | github-code | 13 |
36851203306 | # -*- coding: utf-8 *-*
try:
import simplejson as json
except ImportError:
import json
import requests
from bson import json_util
from mongolabclient import settings, validators, errors
class MongoLabClient(object):
"""Instance class with the API key located at
https://mongolab.com/user?username=[username].
.. note::
The ``version`` parameter is optional, because it is planed for using in
future versions of REST API.
When your connection needs to set a proxy, you can to set an `str` with the
Proxy url to ``proxy_url`` parameter. If you don't set a ``proxy_url``,
then :class:`MongoLabClient` gets system proxy settings.
.. code-block:: python
>>> from mongolabclient import MongoLabClient
>>> MongoLabClient("MongoLabAPIKey", proxy_url="https://127.0.0.1:8000")
MongoLabClient('MongoLabAPIKey', 'v1')
.. sds:: `proxy_handler` was deprecated on 1.3 version.
"""
def __init__(self, api_key, version=settings.VERSION_1, proxy_url=None):
self.api_key = api_key
self.settings = settings.MongoLabSettings(version)
self.__content_type = 'application/json;charset=utf-8'
self.__proxy_url = proxy_url
if not self.__validate_api_key():
raise errors.InvalidAPIKey(self.api_key)
def __validate_api_key(self):
"""Validate API Key format and make a GET request to REST API base url:
https://api.mongolab.com/api/1
"""
if not validators.check_api_key(self.api_key):
raise errors.BadAPIKeyFormat(self.api_key)
r = self.__get_response(settings.VAL_API)
return (r["status"] == 200)
@property
def base_url(self):
"""REST API base url depending selected version."""
if not hasattr(self, "_base_url"):
self._base_url = self.settings.base_url
return self._base_url
@property
def proxy_url(self):
"""Proxy url to using on all of HTTP requests.
.. versionadded: 1.3
"""
return self.__proxy_url
@property
def proxies(self):
if self.proxy_url:
return {'https': self.proxy_url}
return {}
@property
def proxy_handler(self):
"""Instance of :class:`urllib2.ProxyHandler` to using on all of HTTP
requests.
.. deprecated: 1.3
Use :attr:`proxies` instead.
"""
import urllib2
if self.proxy_url:
return urllib2.ProxyHandler({"https": self.proxy_url})
return urllib2.ProxyHandler()
def __get_full_url(self, operation, slug_params):
"""Returns full url of the operation selected with the slug parameters
included.
"""
return (self.base_url + operation[1]) % slug_params
def __get_response(self, operation, slug_params={}, **kwargs):
"""Returns response of HTTP request depending the operation
selected.
"""
operation = self.settings.operations[operation]
url = self.__get_full_url(operation, slug_params)
method = getattr(requests, operation[0])
headers = {'content-type': self.__content_type}
params = {'apiKey': self.api_key}
data = {}
if operation[0] in ['get', 'delete']:
params.update(kwargs)
elif operation[0] == "post":
data = json.dumps(kwargs.get("data", {}), default=json_util.default)
elif operation[0] == "put":
params.update(kwargs)
del params['data']
data = json.dumps(kwargs.get("data", {}), default=json_util.default)
else:
raise ValueError('Method not allowed.')
params = requests.compat.urlencode(params)
response = method(url, headers=headers, params=params, data=data,
proxies=self.proxies)
return {
"status": response.status_code,
"result": json.loads(response.text,
object_hook=json_util.object_hook)
}
def list_databases(self):
"""Returns a list of databases name of your account.
.. code-block:: bash
GET /databases
"""
r = self.__get_response(settings.LST_DBS)
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def list_collections(self, database):
"""Returns a list of collections name of database selected.
.. code-block:: bash
GET /databases/{database}/collections
"""
r = self.__get_response(settings.LST_COLS, {"db": database})
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def list_documents(self, database, collection, **kwargs):
"""Returns a list of dicts with the matched documents with the query.
.. code-block:: bash
GET /databases/{database}/collections/{collection}
"""
kwargs = validators.check_list_documents_params(**kwargs)
r = self.__get_response(settings.LST_DOCS,
{"db": database, "col": collection}, **kwargs)
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def insert_documents(self, database, collection, doc_or_docs):
"""Insert a document or documents into collection.
.. code-block:: bash
POST /databases/{database}/collections/{collection}
"""
validators.check_documents_to_insert(doc_or_docs)
r = self.__get_response(settings.INS_DOCS,
{"db": database, "col": collection}, data=doc_or_docs)
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def update_documents(self, database, collection, spec, doc_or_docs, upsert,
multi):
"""Update a document or documents that matches with query. It is
posible ``upsert`` data.
.. code-block:: bash
PUT /databases/{database}/collections/{collection}
"""
validators.check_document_to_update(doc_or_docs)
r = self.__get_response(settings.UPD_DOCS,
{"db": database, "col": collection},
data=doc_or_docs, q=spec, m=multi, u=upsert)
if r["status"] == 200:
if r["result"]["error"]:
raise Exception(r["result"]["error"])
return r["result"]["n"]
raise Exception(r["result"]["message"])
def delete_replace_documents(self, database, collection, spec={},
documents=[]):
"""Delete o replace a document or documents that matches with query.
.. code-block:: bash
PUT /databases/{database}/collections/{collection}
"""
r = self.__get_response(settings.DEL_REP_DOCS,
{"db": database, "col": collection}, data=documents, q=spec)
if r["status"] == 200:
return r["result"]["n"]
raise Exception(r["result"]["message"])
def view_document(self, database, collection, _id):
"""Returns a dict with document matched with this ``_id``.
.. code-block:: bash
GET /databases/{database}/collections/{collection}/{_id}
"""
r = self.__get_response(settings.VIW_DOC,
{"db": database, "col": collection, "id": str(_id)})
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def update_document(self, database, collection, _id, document):
"""Update a document matched with this ``_id``, returns number of
documents affected.
.. code-block:: bash
PUT /databases/{database}/collections/{collection}/{_id}
"""
r = self.__get_response(settings.UPD_DOC,
{"db": database, "col": collection, "id": str(_id)},
data=document)
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def delete_document(self, database, collection, _id):
"""Delete a document matched with this ``_id``, returns a :class:`dict`
with deleted document or a list of dicts with deleted documents.
.. code-block:: bash
DELETE /databases/{database}/collections/{collection}/{_id}
"""
r = self.__get_response(settings.DEL_DOC,
{"db": database, "col": collection, "id": str(_id)})
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
def run_command(self, database, command):
"""Run a database-collection level command.
.. code-block:: bash
POST /databases/{database}/runCommand
"""
r = self.__get_response(settings.RUN_DB_COL_LVL_CMD, {"db": database},
data=command)
if r["status"] == 200:
return r["result"]
raise Exception(r["result"]["message"])
| puentesarrin/pymongolab | mongolabclient/client.py | client.py | py | 9,002 | python | en | code | 7 | github-code | 13 |
30900917736 | """empty message
Revision ID: 6c1fa3d9287d
Revises:
Create Date: 2020-04-04 11:50:32.327290
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6c1fa3d9287d'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('loading', sa.Column('loading', sa.String(), nullable=True))
op.alter_column('loading', 'loading_type',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.drop_column('loading', 'loading_kpa')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('loading', sa.Column('loading_kpa', sa.NUMERIC(), autoincrement=False, nullable=False))
op.alter_column('loading', 'loading_type',
existing_type=sa.VARCHAR(length=50),
nullable=False)
op.drop_column('loading', 'loading')
# ### end Alembic commands ###
| kanwarjohal/flasktestkanwar | migrations/versions/6c1fa3d9287d_.py | 6c1fa3d9287d_.py | py | 1,043 | python | en | code | 0 | github-code | 13 |
23274482780 | import numpy as np
import pandas as pd
print("iojqwodijddd")
print("new line is added...")
print('add new line in branch...')
size = 9
big = 10.222
nxt = 22
version = 'master'
| supershushu/tm5-test | src/tm5-test.py | tm5-test.py | py | 180 | python | en | code | 1 | github-code | 13 |
20628641821 | import xbmc
import xbmcaddon
import xbmcgui
import os
import re
import sfile
def GetXBMCVersion():
#xbmc.executeJSONRPC('{ "jsonrpc": "2.0", "method": "Application.GetProperties", "params": {"properties": ["version", "name"]}, "id": 1 }')
version = xbmcaddon.Addon('xbmc.addon').getAddonInfo('version')
version = version.split('.')
return int(version[0]), int(version[1]) #major, minor
TITLE = 'TV Portal Tools'
ADDONID = 'script.tvportal.tools'
ADDON = xbmcaddon.Addon(ADDONID)
HOME = ADDON.getAddonInfo('path')
PROFILE = ADDON.getAddonInfo('profile')
TVP_TITLE = 'TV Portal'
TVP_ADDONID = 'script.tvportal'
TVP_ADDON = xbmcaddon.Addon(TVP_ADDONID)
TVP_HOME = xbmc.translatePath(TVP_ADDON.getAddonInfo('path'))
TVP_PROFILE = xbmc.translatePath(TVP_ADDON.getAddonInfo('profile'))
# OTT_CHANNELS = os.path.join(OTT_PROFILE, 'channels')
VERSION = ADDON.getAddonInfo('version')
ICON = os.path.join(HOME, 'icon.png')
FANART = os.path.join(HOME, 'fanart.jpg')
GETTEXT = ADDON.getLocalizedString
MAJOR, MINOR = GetXBMCVersion()
FRODO = (MAJOR == 12) and (MINOR < 9)
GOTHAM = (MAJOR == 13) or (MAJOR == 12 and MINOR == 9)
baseurl = 'http://noobsandnerds.com/TVP'
def GetBaseUrl():
return baseurl
def GetChannelType():
return TVP_ADDON.getSetting('chan.type')
def GetChannelFolder():
CUSTOM = '1'
channelType = GetChannelType()
if channelType == CUSTOM:
path = TVP_ADDON.getSetting('user.chan.folder')
else:
path = TVP_PROFILE
return path
channelFolder = GetChannelFolder()
TVP_CHANNELS = os.path.join(channelFolder, 'channels')
def log(text):
try:
output = '%s V%s : %s' % (TITLE, VERSION, text)
#print(output)
xbmc.log(output, xbmc.LOGDEBUG)
except:
pass
def DialogOK(line1, line2='', line3=''):
d = xbmcgui.Dialog()
d.ok(TITLE + ' - ' + VERSION, line1, line2 , line3)
def DialogYesNo(line1, line2='', line3='', noLabel=None, yesLabel=None):
d = xbmcgui.Dialog()
if noLabel == None or yesLabel == None:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3) == True
else:
return d.yesno(TITLE + ' - ' + VERSION, line1, line2 , line3, noLabel, yesLabel) == True
def CheckVersion():
prev = ADDON.getSetting('VERSION')
curr = VERSION
if prev == curr:
return
ADDON.setSetting('VERSION', curr)
if prev == '0.0.0' or prev == '1.0.0':
folder = xbmc.translatePath(PROFILE)
try:
if not sfile.isdir(folder):
sfile.makedirs(folder)
except: pass
#call showChangeLog like this to workaround bug in openElec
script = os.path.join(HOME, 'showChangelog.py')
cmd = 'AlarmClock(%s,RunScript(%s),%d,True)' % ('changelog', script, 0)
xbmc.executebuiltin(cmd)
def GetFolder(title):
default = ''
folder = xbmc.translatePath(PROFILE)
if not sfile.isdir(folder):
sfile.makedirs(folder)
folder = xbmcgui.Dialog().browse(3, title, 'files', '', False, False, default)
if folder == default:
return None
return xbmc.translatePath(folder)
def showBusy():
busy = None
try:
import xbmcgui
busy = xbmcgui.WindowXMLDialog('DialogBusy.xml', '')
busy.show()
try: busy.getControl(10).setVisible(False)
except: pass
except:
busy = None
return busy
def clean(text):
if not text:
return None
text = re.sub('[:\\\\/*?\<>|"]+', '', text)
text = text.strip()
if len(text) < 1:
return None
return text
def deleteFile(path):
tries = 5
while os.path.exists(path) and tries > 0:
tries -= 1
try:
sfile.remove(path)
break
except:
xbmc.sleep(500)
def showText(heading, text):
id = 10147
xbmc.executebuiltin('ActivateWindow(%d)' % id)
xbmc.sleep(100)
win = xbmcgui.Window(id)
retry = 50
while (retry > 0):
try:
xbmc.sleep(10)
retry -= 1
win.getControl(1).setLabel(heading)
win.getControl(5).setText(text)
return
except:
pass
def showChangelog(addonID=None):
try:
if addonID:
ADDON = xbmcaddon.Addon(addonID)
else:
ADDON = xbmcaddon.Addon(ADDONID)
f = open(ADDON.getAddonInfo('changelog'))
text = f.read()
title = '%s - %s' % (xbmc.getLocalizedString(24054), ADDON.getAddonInfo('name'))
showText(title, text)
except:
pass
if __name__ == '__main__':
pass | whufclee/XBMC-Python | script.tvportal.tools/utils.py | utils.py | py | 4,673 | python | en | code | 1 | github-code | 13 |
31043568539 | from datetime import date
from typing import List, Optional
import uuid
from source.models.book.base import BookBaseModel
from source.views.book import BookView
from werkzeug.exceptions import NotFound
class BookService:
"""Bookactions """
book_model: BookBaseModel
def __init__(self, book_model):
self.book_model = book_model
def save(self, title: str, description: str, tags: List[str], publication_date: date)-> dict:
"""this function save books"""
book = self.book_model(
title=title,
description=description,
tags=tags,
publication_date=publication_date
)
book.save()
return BookView.show(book)
def list(self)-> dict:
"""list all books"""
book_list = self.book_model.list()
return BookView.show_many(book_list)
def find_one(self, id: uuid.UUID)-> dict:
"""find one book"""
book = self.book_model.find(id)
if not book:
raise NotFound('book not found')
return BookView.show(book)
def update_one(self, id: uuid.UUID, title: Optional[str] = None, description: Optional[str] = None, tags: Optional[List[str]] = None, publication_date: Optional[date] = None)-> dict:
"""update a book"""
book = self.book_model.find(id)
if not book:
raise NotFound('book not found')
book.set(
title=title,
description=description,
tags=tags,
publication_date=publication_date
)
book.save()
return BookView.show(book)
def delete_one(self, id: uuid.UUID):
""" delete a book"""
book = self.book_model.find(id)
if not book:
raise NotFound('book not found')
book.delete()
return None
| pluvet/Books | source/services/book.py | book.py | py | 1,895 | python | en | code | 0 | github-code | 13 |
21473637879 | minutes_control = int(input())
seconds_control = int(input())
length = float(input())
seconds_for_hundred_meters = int(input())
control = minutes_control * 60 + seconds_control
time_of_player = length * seconds_for_hundred_meters / 100 - length * 2.5 / 120
if time_of_player <= control:
print('Marin Bangiev won an Olympic quota!')
print(f'His time is {time_of_player:.3f}.')
else:
print(f'No, Marin failed! He was {time_of_player - control:.3f} second slower.') | patsonev/Python_Basics_Exam_Preparation | skeleton.py | skeleton.py | py | 490 | python | en | code | 0 | github-code | 13 |
33690811070 | from typing import Any, Dict, List
from pydbml import PyDBML
from . import logger
from .data_types_generator import DataTypeGenerator
from .distribution import DistributionGenerator, DistributionType
class GeneratorDirectiveDependency:
def __init__(self, referenced_field: str, referenced_directive: 'GeneratorDirective'
) -> None:
if referenced_field is None or referenced_directive is None:
raise Exception('TODO: eliminar. Mala construccion de depenencias')
self.referenced_field = referenced_field
self.referenced_directive = referenced_directive
class GeneratorDirective:
def __init__(self, index: int, name: str, num_samples: int, fields: Dict[str, DataTypeGenerator], dependencies: Dict[str, GeneratorDirectiveDependency]):
self.index = index
self.name = name
self.fields = fields
self.num_samples = num_samples
self.dependencies = dependencies if dependencies is not None else {}
self._generated_data = None
def __generate_dependencies_entries(self) -> List[Dict[str, Any]]:
# auxiliar functions
def _clone_entries(entries):
return [{k: v for k, v in e.items()} for e in entries]
dependency_entries = []
for field_name, dependency in self.dependencies.items():
# fetch basic dependency information
dependency_field = dependency.referenced_field
dependency_directive = dependency.referenced_directive
logger.debug(f'retrieving samples from {dependency_directive.name}.{dependency_field} for field {self.name}.{field_name}')
dependency_values = dependency_directive.fetch_field(field=dependency_field)
# calculate new dependency_entries
dependency_values_entries = [{field_name: value} for value in dependency_values]
# if dependency_entries is empty, set new dependency_entries as the current ones
# if dependency_entries is not empty, cross the existing dependencies with the new ones
if not dependency_entries:
dependency_entries = dependency_values_entries
else:
new_dependency_entries = []
for dependency_entry in dependency_values_entries:
current_dependency_entry_entries = [{**dependency_entry, **entry} for entry in _clone_entries(dependency_entries)]
new_dependency_entries.extend(current_dependency_entry_entries)
dependency_entries = new_dependency_entries
return dependency_entries
def __generate_value_entries(self, num_samples: int
) -> List[Dict[str, Any]]:
value_entries_dict = {}
for field in self.fields:
logger.debug(f'generating {num_samples} samples for field {self.name}.{field}')
field_values = self.fields.get(field).generate(num_samples=num_samples)
value_entries_dict[field] = field_values
# format fields into entries
logger.debug(f'preparing generated entries of {self.name} to cross with dependency entries')
value_entries = [{k: value_entries_dict[k][i] for k in value_entries_dict} for i in range(num_samples)]
return value_entries
def __cross_dependency_and_value_entries(self, dependencies_entries: List[Dict[str, Any]], value_entries: List[Dict[str, Any]]
) -> List[Dict[str, Any]]:
# auxiliar functions
def _chunk_list(list_, chunk_size):
return [list_[i:i + chunk_size] for i in range(0, len(list_), chunk_size)]
# split the value entries in chunks of size self.num_samples, because there is that number of samples assigned
# for each entry of the dependencies entries
chunked_value_entries = _chunk_list(value_entries, self.num_samples)
# merge two lists replicating each dependency_entry
entries = []
for dependency_entry, value_entries_chunk in zip(dependencies_entries, chunked_value_entries):
new_entries = [{**dependency_entry, **value_entry} for value_entry in value_entries_chunk]
entries.extend(new_entries)
return entries
def generate(self) -> List[Dict[str, Any]]:
logger.info(f'generating data for directive {self.name}')
if self.dependencies:
# calculate the dependencies
logger.debug('crossing dependencies entries')
dependencies_entries = self.__generate_dependencies_entries()
num_samples = self.num_samples * len(dependencies_entries)
# generate common fields
logger.debug(f'generating {num_samples} samples of common fields')
value_entries = self.__generate_value_entries(num_samples=num_samples)
# cross generated values
logger.debug('crossing with dependency entries with generated entry values')
entries = self.__cross_dependency_and_value_entries(
dependencies_entries=dependencies_entries, value_entries=value_entries
)
else:
# generate common fields directly
logger.debug(f'no dependencies found, generating ({self.num_samples}) of common fields.')
entries = self.__generate_value_entries(num_samples=self.num_samples)
# format entries for its final value
logger.debug('formatting generated dependencies.')
all_fields = list(self.fields) + list(self.dependencies)
self._generated_data = {
field: [entry[field] for entry in entries]
for field in all_fields
}
def fetch(self) -> Dict[str, List[Any]]:
if self._generated_data is None:
self.generate()
return self._generated_data
def fetch_field(self, field: str) -> List[Any]:
if field not in self.fields and field not in self.dependencies:
raise ValueError(f'The requested field "{field}" is not available in the directive "{self.name}". Only there is available "{", ".join([f for f in self.fields.keys()])}" and "{", ".join([f for f in self.dependencies.keys()])}".')
return self.fetch().get(field)
def reset(self) -> None:
logger.debug(f'reset data from {self.table}')
self._generated_data = None
class DirectiveBuilder:
def _calculate_dependencies(self, dbml: PyDBML
) -> Dict[str, List[str]]:
# extract all dependencies
dependencies = {}
for table in dbml.tables:
for ref in table.get_refs():
ref_type = ref.type
# note: table1 ref_type table2
if ref_type == '>':
referenced_table = ref.table2
referencing_table = ref.table1
referenced_columns = ref.col2
referencing_columns = ref.col1
elif ref_type == '<':
referenced_table = ref.table1
referencing_table = ref.table2
referenced_columns = ref.col1
referencing_columns = ref.col2
else: # if is one to one, the main one is understood like the first one
referenced_table = ref.table2
referencing_table = ref.table1
referenced_columns = ref.col2
referencing_columns = ref.col1
# get names
referenced_table = referenced_table.name
referencing_table = referencing_table.name
referenced_columns = [c.name for c in referenced_columns]
referencing_columns = [c.name for c in referencing_columns]
# get referenced_colum
referenced_column = referenced_columns[0]
if len(referencing_columns) > 1:
raise Exception(f'Generation Error: There is more than one column beign referenced by {referencing_table}.{referenced_column}. Concretelly {len(referencing_columns)} columns: {referencing_columns}.')
# register references
if referencing_table not in dependencies:
dependencies[referencing_table] = {}
for referencing_column in referencing_columns:
dependencies[referencing_table][referencing_column] = {
'table': referenced_table, 'column': referenced_column
}
# clean duplicated references
for referencing_table, table_references in dependencies.items():
for referencing_column, table_reference in table_references.items():
referenced_table = table_reference.get('table')
referenced_column = table_reference.get('column')
referenced_table_references = dependencies.get(referenced_table)
if referenced_table_references is None:
continue
referenteced_column_references = referenced_table_references.get(referenced_column)
if referenteced_column_references is None:
continue
dup_tab = referenteced_column_references.get('table')
dup_col = referenteced_column_references.get('column')
if referencing_table == dup_tab and referencing_column == dup_col:
del referenced_table_references[referenced_column]
# clean empty referenced tables
dependencies = {table: references for table, references in dependencies.items() if references}
return dependencies
def __build_directive_for_table(self, table: str, table_config: Dict[str, Dict[str, Any]], table_dependencices: Dict[str, Dict[str, Dict[str, str]]]
) -> GeneratorDirective:
"""
Note: does not set dependencies.
"""
fields = {}
for field, field_configuration in table_config.items():
# look for depoendencies
field_dependency = table_dependencices.get(field) if table_dependencices is not None else None
if field_dependency is not None:
logger.debug(f'skipping field {table}.{field} due to external reference found to {field_dependency.get("table")}.{field_dependency.get("field")}')
continue
# if there is no dependency, then build generator for field
logger.debug(f'building data generator for field {table}.{field}')
value = field_configuration.get('value')
data_type = field_configuration.get('type')
num_samples = field_configuration.get('samples')
collection_values = field_configuration.get('values')
generable_expression = field_configuration.get('generator')
distribution_info = field_configuration.get('distribution')
end = value.get('end') if value is not None else None
start = value.get('start') if value is not None else None
distribution_type = distribution_info.get('start') if distribution_info is not None else None
distribution_config = distribution_info.get('start') if distribution_info is not None else None
distribution_type = DistributionType.from_str(distribution_type)
# instance distribution generator
distribution_generator = DistributionGenerator.build(
distribution_type=distribution_type, config=distribution_config
)
# instance data generator
data_generator = DataTypeGenerator.build(
data_type=data_type, start=start, end=end, generable_expression=generable_expression, collection_values=collection_values, distribution_generator=distribution_generator
)
# add generator to generator_list
fields[field] = data_generator
# build directive
logger.debug(f'building generation directive for {table}')
directive = GeneratorDirective(
index=None, name=table, num_samples=num_samples, fields=fields, dependencies=None
)
return directive
def __build_dependencies_for_table_directive(self, table: str, directives: List[GeneratorDirective], table_dependencices: Dict[str, Dict[str, Dict[str, str]]]
) -> None:
# build dependencies
dependencies = {}
for field, dependency in table_dependencices.items():
logger.debug(f'building dependency directive for field {table}.{field}')
referenced_table = dependency.get('table')
referenced_field = dependency.get('column')
referenced_directive = directives.get(referenced_table)
dependency_directive = GeneratorDirectiveDependency(
referenced_field=referenced_field, referenced_directive=referenced_directive
)
dependencies[field] = dependency_directive
# update directives in table's directive
directives[table].dependencies = dependencies
def _build_directives(self, tables_config: Dict[str, Any], dependencies: Dict[str, List[str]]
) -> Dict[str, Dict[str, str]]:
# build directives and data generators for each column
logger.debug('building data generators for each field not beign referenced. Also building directives for each table.')
directives = {}
for table, table_config in tables_config.items():
logger.debug(f'building data generators for {table}')
# fetch dependencies and initialize values
table_dependencices = dependencies.get(table)
# build directive for table
directive = self.__build_directive_for_table(
table=table, table_config=table_config, table_dependencices=table_dependencices
)
directives[table] = directive
# build dependencies
logger.debug('populating directives\' dependencies')
for table in tables_config:
table_dependencices = dependencies.get(table)
if table_dependencices is not None:
# update table's directive with dependencies
logger.debug(f'populating directives\' dependencies for table {table}')
self.__build_dependencies_for_table_directive(
table=table, directives=directives, table_dependencices=table_dependencices
)
else:
logger.debug(f'no dependencies found for table {table}: skipping populating directives\' dependencies process')
# retrieve directives objects from dictionary
directives = list(directives.values())
return directives
def _calculate_sequence(self, directives: List[GeneratorDirective], dependencies: Dict[str, Dict[str, str]]
) -> None:
# format dependencies in the needed format
references = {}
for referencing_table, referencing_table_columns in dependencies.items():
for referencing_table_column, referencing_table_column_dependencies in referencing_table_columns.items():
referenced_table = referencing_table_column_dependencies.get('table')
if referenced_table not in references:
references[referenced_table] = []
references[referenced_table].append(referencing_table)
# calculate all tables names
all_tables = [directive.name for directive in directives]
# calculate the table sequence
sequence = []
for table in all_tables:
# fetch references
table_referencing_tables = references.get(table)
table_referencing_tables = [] if table_referencing_tables is None else table_referencing_tables
table_referencing_tables_inserted = [referencing_table for referencing_table in table_referencing_tables if referencing_table in sequence]
# calculate index to insert
max_index = len(sequence) # the maxium index to insert is the maximum
referencing_tables_indexes = [sequence.index(referencing_table) for referencing_table in table_referencing_tables_inserted]
possible_indexes_to_insert = [max_index] + [index + 1 for index in referencing_tables_indexes]
# select a position previous to all referencing tables (because the generation of that tables depends directly on current table)
index_to_insert = min(possible_indexes_to_insert)
# insert into sequence
sequence.insert(index_to_insert, table)
logger.debug(f'calculated directive sequence: {sequence}')
# update directives
for directive in directives:
position_in_sequence = sequence.index(directive.name)
directive.index = position_in_sequence
def build(self, dbml: PyDBML, config: Dict[str, Any]
) -> List[GeneratorDirective]:
# create directives mixing the populate and the dependencies (to avoid multiple data generation)
logger.debug('calculating dependencies')
dependencies = self._calculate_dependencies(
dbml=dbml
)
# create directives mixing the populate and the dependencies (to avoid multiple data generation)
logger.debug('instancing building directives')
directives = self._build_directives(
tables_config=config, dependencies=dependencies
)
# order the tables in a inorder iteration (tree iteration) to generate first the tables with no dependencies
logger.debug('calculating table sequency')
self._calculate_sequence(
directives=directives, dependencies=dependencies
)
# sorty by table sequency
dependencies = list(sorted(directives, key=lambda x: x.index))
return directives
| GandalFran/fake-db-data-generator | fake_db_datagen/directive_builder.py | directive_builder.py | py | 18,209 | python | en | code | 3 | github-code | 13 |
72097673617 | from pyradioconfig.parts.bobcat.profiles.Profile_WiSUN import Profile_WiSUN_Bobcat
from pyradioconfig.parts.common.profiles.viper_regs import build_modem_regs_viper
from pyradioconfig.parts.common.profiles.profile_common import buildCrcOutputs, buildFecOutputs, buildFrameOutputs, \
buildWhiteOutputs
class profile_wisun_viper(Profile_WiSUN_Bobcat):
def __init__(self):
self._profileName = "WiSUN"
self._readable_name = "WiSUN Profile"
self._category = ""
self._description = "Profile used for WiSUN PHYs"
self._default = False
self._activation_logic = ""
self._family = "viper"
def build_register_profile_outputs(self, model, profile):
family = self._family
build_modem_regs_viper(model, profile, family)
buildFrameOutputs(model, profile, family)
buildCrcOutputs(model, profile, family)
buildWhiteOutputs(model, profile)
buildFecOutputs(model, profile) | jc-plhm/Z3GatewayHost_Sectronic | src/platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/viper/profiles/Profile_WiSUN.py | Profile_WiSUN.py | py | 972 | python | en | code | 1 | github-code | 13 |
39777640769 | import html
import logging
from pathlib import Path
from django.conf import settings
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.core import mail
from django.core.cache import cache
from django.core.validators import URLValidator
from django.db import IntegrityError, models, transaction
from django.db.models import Q
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.html import strip_tags
from django.utils.translation import gettext as _
from tinymce.models import HTMLField
from packman.calendars.models import PackYear
from packman.committees.models import Committee
from packman.core.models import TimeStampedModel, TimeStampedUUIDModel
from packman.dens.models import Den
from packman.membership.models import Family
from .managers import MessageManager, MessageRecipientQuerySet
from .utils import ListEmailMessage
logger = logging.getLogger(__name__)
User = get_user_model()
def get_upload_to(instance, filename):
return _("mail/%(uuid)s/%(file)s") % {"uuid": instance.message.uuid, "file": filename}
class Mailbox(models.TextChoices):
INBOX = "inbox", _("Inbox")
DRAFTS = "drafts", _("Drafts")
OUTBOX = "outbox", _("Outbox")
SENT = "sent", _("Sent")
ARCHIVES = "archives", _("Archives")
TRASH = "trash", _("Trash")
@classmethod
def outbound(cls):
return [cls.DRAFTS.value, cls.OUTBOX.value, cls.SENT.value]
class DistributionList(TimeStampedModel):
"""
A simple model to track message group addresses and their members.
"""
name = models.CharField(_("name"), max_length=150, unique=True)
is_all = models.BooleanField(
_("all members"),
default=False,
help_text=_(
"Messages sent to this distribution list should be delivered to " "all active members of the Pack."
),
)
dens = models.ManyToManyField(
Den, related_name="distribution_list", related_query_name="distribution_list", blank=True
)
committees = models.ManyToManyField(
Committee, related_name="distribution_list", related_query_name="distribution_list", blank=True
)
class Meta:
ordering = ["name"]
verbose_name = _("Distribution List")
verbose_name_plural = _("Distribution Lists")
def __str__(self):
return self.name
def get_default_email(self):
return self.addresses.get(is_default=True)
def get_members(self):
if self.is_all:
return User.objects.active()
return (
User.objects.active()
.filter(
Q(committee_membership__year=PackYear.objects.current(), committee__in=self.committees.all())
| Q(family__in=Family.objects.in_den(self.dens.all()))
)
.distinct()
)
class EmailAddress(TimeStampedModel):
"""
A simple model to track an email address.
"""
distribution_list = models.ForeignKey(DistributionList, on_delete=models.CASCADE, related_name="addresses")
address = models.EmailField(
_("email address"),
unique=True,
error_messages={"unique": _("A distribution list with this email address already exists.")},
)
is_default = models.BooleanField(
_("default"),
default=False,
help_text=_("Indicates whether this address should be used as the default from address."),
)
class Meta:
ordering = ("-is_default", "address")
verbose_name = _("Email Address")
verbose_name_plural = _("Email Addresses")
def __str__(self):
return self.address
def save(self, **kwargs):
# Check to ensure whether one and only one address is default for the distribution list.
if self.is_default:
EmailAddress.objects.filter(distribution_list=self.distribution_list, is_default=True).update(
is_default=False
)
elif not EmailAddress.objects.filter(distribution_list=self.distribution_list, is_default=True).exists():
self.is_default = True
super().save(**kwargs)
class Thread(TimeStampedUUIDModel):
"""
A simple model to track a thread of messages.
"""
class Meta:
verbose_name = _("Thread")
verbose_name_plural = _("Threads")
def __str__(self):
return str(self.pk)
class Message(TimeStampedUUIDModel):
"""
A model describing the structure of a message.
"""
class Delivery(models.TextChoices):
TO = "to", _("To")
CC = "cc", _("Cc")
# BCC = "bcc", _("Bcc")
class Status(models.TextChoices):
DRAFT = "DRAFT", _("Draft")
QUEUED = "QUEUED", _("Queued")
SENDING = "SENDING", _("Sending")
SENT = "SENT", _("Sent")
FAILED = "FAILED", _("Failed")
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="sent_messages", related_query_name="sent_message", blank=True
)
recipients = models.ManyToManyField(User, related_name="messages", through="MessageRecipient", blank=True)
distribution_lists = models.ManyToManyField(
DistributionList, related_name="messages", through="MessageDistribution", blank=True
)
subject = models.CharField(_("subject"), max_length=150)
body = HTMLField(_("body"))
thread = models.ForeignKey(Thread, on_delete=models.CASCADE, related_name="messages", blank=True, null=True)
parent = models.ForeignKey(
"self", on_delete=models.CASCADE, related_name="replies", blank=True, null=True, verbose_name=_("replying to")
)
date_sent = models.DateTimeField(_("sent"), blank=True, null=True)
status = models.CharField(_("status"), max_length=8, choices=Status.choices, default=Status.DRAFT, editable=False)
objects = MessageManager()
class Meta:
get_latest_by = "last_updated"
ordering = ["-last_updated"]
verbose_name = _("Message")
verbose_name_plural = _("Messages")
def __str__(self):
return self.subject
def save(self, **kwargs):
if not self.thread:
self.thread = self.parent.thread if self.parent else Thread.objects.create()
super().save(**kwargs)
def get_absolute_url(self):
return reverse("mail:detail", kwargs={"pk": self.pk})
def get_mailbox(self):
if self.status == Message.Status.SENT:
return Mailbox.SENT
elif self.status == Message.Status.DRAFT:
return Mailbox.DRAFTS
elif self.status == Message.Status.SENDING:
return Mailbox.OUTBOX
def send(self):
# ensure all mailboxes are expanded
self.expand_distribution_lists()
if not self.recipients.exists():
raise AttributeError(_("Cannot send an Email with no recipients."))
# open a connection to the mail server
with mail.get_connection() as connection:
messages = self._personalize_messages()
succeeded = connection.send_messages(messages)
logger.info(_("Sent %d emails") % succeeded)
# Mark the message as sent
self.date_sent = timezone.now()
self.status = Message.Status.SENT
self.save()
def _personalize_messages(self):
messages = []
protocol = "https" if settings.CSRF_COOKIE_SECURE else "http"
site = Site.objects.get_current()
author_name = self.author.__str__()
author_email = self.author.email
distros_string = ", ".join(self.distribution_lists.values_list("name", flat=True))
subject = f"[{distros_string}] {self.subject}"
for recipient in self.recipients.all():
logger.info(_("Generating an email copy for %s") % recipient)
# Personalize the email for each recipient.
context = {"site": site, "message": self, "recipient": recipient, "protocol": protocol}
plaintext = render_to_string("mail/message_body.txt", context)
richtext = render_to_string("mail/message_body.html", context)
# compose the email
msg = ListEmailMessage(
subject,
plaintext,
to=[f"{recipient.__str__()} <{recipient.email}>"],
reply_to=[f"{author_name} <{author_email}>"],
alternatives=[(richtext, "text/html")],
settings=ListSettings.current(),
)
# add any attachments
for attachment in self.attachments.all():
msg.attach_file(attachment.filename.path)
messages.append(msg)
return messages
@admin.display(boolean=True, description=_("sent"))
def sent(self):
return bool(self.date_sent)
def get_plaintext_body(self):
return html.unescape(strip_tags(self.body))
def get_recipients(self):
distros = (
self.distribution_lists.filter(addresses__is_default=True)
.values_list("message_distribution_list__delivery", "addresses__address", "name")
.order_by()
)
recipients = (
self.recipients.filter(message_recipient__from_distro=False)
.values_list("message_recipient__delivery", "_short_name", "email")
.order_by()
)
return recipients.union(distros)
def expand_distribution_lists(self):
# Create unique MessageRecipient instances for each DistributionList in the message.
for delivery, label in Message.Delivery.choices:
for distribution_list in self.distribution_lists.filter(message_distribution_list__delivery=delivery):
for member in distribution_list.get_members():
try:
with transaction.atomic():
MessageRecipient.objects.create(
message=self, recipient=member, delivery=delivery, from_distro=True
).distros.add(distribution_list)
except IntegrityError:
# The member is already a recipient of the message,
# check that we have the delivery level correct.
recipient = MessageRecipient.objects.get(message=self, recipient=member)
recipient.distros.add(distribution_list)
if recipient.delivery < delivery:
recipient.delivery = delivery
recipient.save()
def mark_read(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_read()
def mark_unread(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_unread()
def mark_archived(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_archived()
def mark_unarchived(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_unarchived()
def mark_deleted(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_deleted()
def mark_undeleted(self, recipient):
MessageRecipient.objects.get(message=self, recipient=recipient).mark_undeleted()
class Attachment(models.Model):
"""
A simple model to track message attachments.
"""
message = models.ForeignKey(Message, on_delete=models.CASCADE, related_name="attachments")
filename = models.FileField(_("file"), upload_to=get_upload_to, help_text=_("attachments should be under 5mb."))
class Meta:
verbose_name = _("Attachment")
verbose_name_plural = _("Attachments")
def __str__(self):
return Path(self.filename.name).name
class MessageRecipient(models.Model):
"""
An intermediate through model to track an individual member's copy of a message.
"""
delivery = models.CharField("", max_length=3, choices=Message.Delivery.choices, default=Message.Delivery.TO)
message = models.ForeignKey(
Message, on_delete=models.CASCADE, related_name="message_recipients", related_query_name="message_recipient"
)
recipient = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="message_recipients", related_query_name="message_recipient"
)
from_distro = models.BooleanField(
_("distribution list"),
default=False,
help_text=_(
"Specify whether this recipient was included in the message "
"directly or as part of a larger distribution list."
),
)
distros = models.ManyToManyField(DistributionList)
date_received = models.DateTimeField(_("received"), auto_now_add=True)
date_read = models.DateTimeField(_("read"), blank=True, null=True)
date_archived = models.DateTimeField(_("archived"), blank=True, null=True)
date_deleted = models.DateTimeField(_("deleted"), blank=True, null=True)
objects = MessageRecipientQuerySet.as_manager()
class Meta:
constraints = [models.UniqueConstraint(fields=("message", "recipient"), name="unique_message_per_recipient")]
verbose_name = _("Message Recipient")
verbose_name_plural = _("Message Recipients")
def __str__(self):
return self.recipient.__str__()
@admin.display(boolean=True, description=_("read"))
def is_read(self):
return bool(self.date_read)
def is_archived(self):
return bool(self.date_archived)
def is_deleted(self):
return bool(self.date_deleted)
def get_mailbox(self):
if self.message.author == self.recipient:
if self.message.status == Message.Status.SENT:
return Mailbox.SENT
elif self.message.status == Message.Status.DRAFT:
return Mailbox.DRAFTS
elif self.message.status == Message.Status.SENDING:
return Mailbox.OUTBOX
elif self.is_deleted():
return Mailbox.TRASH
elif self.is_archived():
return Mailbox.ARCHIVES
else:
return Mailbox.INBOX
def mark_read(self):
if not self.date_read:
self.date_read = timezone.now()
self.save()
def mark_unread(self):
self.date_read = None
self.save()
def mark_archived(self):
if not self.date_archived:
self.date_archived = timezone.now()
self.date_deleted = None
self.save()
def mark_unarchived(self):
self.date_archived = None
self.date_deleted = None
self.save()
def mark_deleted(self):
if not self.date_deleted:
self.date_deleted = timezone.now()
self.date_archived = None
self.save()
def mark_undeleted(self):
self.date_deleted = None
self.date_archived = None
self.save()
class MessageDistribution(models.Model):
"""
An intermediate through model to track delivery of bulk messages.
"""
delivery = models.CharField("", max_length=3, choices=Message.Delivery.choices, default=Message.Delivery.TO)
message = models.ForeignKey(Message, on_delete=models.CASCADE, related_query_name="message_distribution_list")
distribution_list = models.ForeignKey(
DistributionList, on_delete=models.CASCADE, related_query_name="message_distribution_list"
)
class Meta:
constraints = [
models.UniqueConstraint(
fields=("message", "distribution_list"), name=_("unique_message_per_distribution_list")
)
]
verbose_name = _("Message Distribution List")
verbose_name_plural = _("Message Distribution Lists")
def __str__(self):
return self.distribution_list.__str__()
class ListSettings(TimeStampedModel):
"""
A simple model to track settings for email lists
"""
list_id = models.CharField(
_("list ID"),
max_length=100,
validators=[URLValidator],
help_text=_(
"A List-Id will be included in the header of any email sent from "
"this application. The List-Id should be unique to the list and "
"clearly identify your organization (e.g. lists.example.com)."
),
)
name = models.CharField(
_("name"),
max_length=100,
blank=True,
help_text=_("An optional descriptive name for emails generated by this " "application."),
)
from_name = models.CharField(
_("from name"),
max_length=40,
blank=True,
help_text=_(
"The name that will be displayed in the From line of emails sent "
"from this list. If left blank, will default to the from email."
),
)
from_email = models.EmailField(
_("from email"),
blank=True,
help_text=_(
"The email address that emails sent from this list will "
"originate from. If left blank, will default to the site's "
"DEFAULT_FROM_EMAIL as specified in site settings."
),
)
subject_prefix = models.CharField(
_("subject line prefix"),
max_length=20,
blank=True,
help_text=_(
"If provided, the subject prefix will precede every sent email's " "subject in the email subject field."
),
)
class Meta:
verbose_name = _("Settings")
verbose_name_plural = _("Settings")
def __str__(self):
return self.list_id
def save(self, *args, **kwargs):
if self.__class__.objects.count():
self.pk = self.__class__.objects.first().pk
cache.set("mail_settings", self)
super().save(*args, **kwargs)
@classmethod
def current(cls):
mail_settings = cache.get("mail_settings")
if not mail_settings:
try:
mail_settings = cls.objects.first()
cache.set("mail_settings", mail_settings)
except cls.DoesNotExist:
pass
return mail_settings
| Pack144/packman | packman/mail/models.py | models.py | py | 18,221 | python | en | code | 1 | github-code | 13 |
7164715691 | import torch
import torch.nn as nn
import numpy as np
import pickle
import torch.nn.functional as F
from model import models
from model.models import FewShotModel
def pairwise_distances_logits(query, proto, distance_type='euclid'):
# query n * dim
# prototype n_c * dim
if distance_type == 'euclid':
n = query.shape[0]
m = proto.shape[0]
distances = -((query.unsqueeze(1).expand(n, m, -1) -
proto.unsqueeze(0).expand(n, m, -1))**2).sum(dim=2)
# print(distances.shape)
return distances
elif distance_type == 'cosine':
emb_dim = proto.shape[-1]
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(-1, emb_dim) # (Nbatch, Nq*Nw, d)
# print([query.shape, proto.shape])
logits = torch.matmul(query, proto.transpose(1,0))
# print('logits.shape', logits.shape)
return logits
def get_k_base(proto, all_proto, return_weights=False, k=10, train=True):
# print('train is ', train)
if train:
start = 1
end =0
else:
start = 0
end = 1
similarities = pairwise_distances_logits(proto, all_proto).squeeze()
if similarities.dim() ==1:
similarities = similarities.unsqueeze(0)
similarities_sorted = torch.sort(similarities, descending=True, dim=1)
a_ind = similarities_sorted.indices
if return_weights:
a = similarities_sorted.values
a = F.softmax(a[:,start:], dim=1)
return a_ind[:, start:k-end], a[:, :k-start-end]
return a_ind[:, start:k-end]
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
log_attn = F.log_softmax(attn, 2)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn, log_attn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_ks = nn.Linear(d_model, n_head * d_k, bias=False)
self.w_vs = nn.Linear(d_model, n_head * d_v, bias=False)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
output, attn, log_attn = self.attention(q, k, v)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output
class FEATBaseTransformer(FewShotModel):
def __init__(self, args):
super().__init__(args)
if args.backbone_class == 'ConvNet':
hdim = 64
elif args.backbone_class == 'Res12':
hdim = 640
elif args.backbone_class == 'Res18':
hdim = 512
elif args.backbone_class == 'WRN':
hdim = 640
else:
raise ValueError('')
self.slf_attn = MultiHeadAttention(1, hdim, hdim, hdim, dropout=0.5)
if args.base_protos==1:
# using base protos
print('Using base protos')
proto_dict = pickle.load(open('/home/mayug/projects/few_shot/notebooks/proto_dict_new.pkl', 'rb'))
self.all_proto = torch.cat([torch.Tensor(proto_dict[i]).unsqueeze(0) for i in range(len(proto_dict))], dim=0).cuda()
elif args.base_protos==0:
# using base instances
print('using base instances')
proto_dict = torch.load('/home/mayug/projects/few_shot/notebooks/embeds_cache.pt')
self.all_proto = proto_dict['embeds'].cuda()
self.proto_dict = proto_dict
self.proto_dict = proto_dict
self.after_attn = None
def get_base_protos(self, proto):
proto = proto.squeeze()
top_k = get_k_base(proto, self.all_proto, k=self.args.k)
# print(proto.shape)
# print(top_k)
base_protos = self.all_proto[top_k, :]
# print(base_protos.shape)
# base_protos = base_protos.mean(1).unsqueeze(0)
# print(base_protos.shape)
return base_protos
def _forward(self, instance_embs, support_idx, query_idx):
emb_dim = instance_embs.size(-1)
# organize support/query data
support = instance_embs[support_idx.contiguous().view(-1)].contiguous().view(*(support_idx.shape + (-1,)))
query = instance_embs[query_idx.contiguous().view(-1)].contiguous().view( *(query_idx.shape + (-1,)))
# get mean of the support
proto = support.mean(dim=1) # Ntask x NK x d
# add closest 10 base classes to proto and calculate mean
base_protos = self.get_base_protos(proto)
# print('base_protos', base_protos.shape)
# print('proto ', proto.shape)
# including base_protos into key and value
proto = proto.squeeze()
combined_protos = torch.cat([proto.unsqueeze(1), base_protos], dim=1)
proto = proto.unsqueeze(0)
# print('combined_protos ', combined_protos.shape)
combined_protos = combined_protos.reshape(-1, emb_dim).unsqueeze(0)
# print('combined_protos ', combined_protos.shape)
num_batch = proto.shape[0]
num_proto = proto.shape[1]
num_query = np.prod(query_idx.shape[-2:])
# query: (num_batch, num_query, num_proto, num_emb)
# proto: (num_batch, num_proto, num_emb)
proto = self.slf_attn(proto, combined_protos, combined_protos)
self.after_attn = proto
# print('after attention ', proto.shape)
# asd
if self.args.use_euclidean:
query = query.view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
proto = proto.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
proto = proto.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
logits = - torch.sum((proto - query) ** 2, 2) / self.args.temperature
else:
proto = F.normalize(proto, dim=-1) # normalize for cosine distance
query = query.view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits = torch.bmm(query, proto.permute([0,2,1])) / self.args.temperature
logits = logits.view(-1, num_proto)
# for regularization
if self.training:
# return logits, None
# TODO this can be further adapted for basetransformer version
aux_task = torch.cat([support.view(1, self.args.shot, self.args.way, emb_dim),
query.view(1, self.args.query, self.args.way, emb_dim)], 1) # T x (K+Kq) x N x d
num_query = np.prod(aux_task.shape[1:3])
aux_task = aux_task.permute([0, 2, 1, 3])
aux_task = aux_task.contiguous().view(-1, self.args.shot + self.args.query, emb_dim)
# apply the transformation over the Aug Task
aux_emb = self.slf_attn(aux_task, aux_task, aux_task) # T x N x (K+Kq) x d
# compute class mean
aux_emb = aux_emb.view(num_batch, self.args.way, self.args.shot + self.args.query, emb_dim)
aux_center = torch.mean(aux_emb, 2) # T x N x d
if self.args.use_euclidean:
aux_task = aux_task.permute([1,0,2]).contiguous().view(-1, emb_dim).unsqueeze(1) # (Nbatch*Nq*Nw, 1, d)
aux_center = aux_center.unsqueeze(1).expand(num_batch, num_query, num_proto, emb_dim).contiguous()
aux_center = aux_center.view(num_batch*num_query, num_proto, emb_dim) # (Nbatch x Nq, Nk, d)
# print('aux center ', aux_center.shape)
# print('aux_task ', aux_task.shape)
logits_reg = - torch.sum((aux_center - aux_task) ** 2, 2) / self.args.temperature2
else:
aux_center = F.normalize(aux_center, dim=-1) # normalize for cosine distance
aux_task = aux_task.permute([1,0,2]).contiguous().view(num_batch, -1, emb_dim) # (Nbatch, Nq*Nw, d)
logits_reg = torch.bmm(aux_task, aux_center.permute([0,2,1])) / self.args.temperature2
logits_reg = logits_reg.view(-1, num_proto)
return logits, logits_reg
else:
return logits
| mayug/BaseTransformers | model/models/feat_basetransformer.py | feat_basetransformer.py | py | 10,071 | python | en | code | 8 | github-code | 13 |
35735270255 | age = 27
isDrunk = False
isRestrictedArea = False
if age < 18:
print("You are too young")
elif isDrunk:
print("You are drunk. No alcohol")
elif isRestrictedArea:
print("Restricted, alco forbidden")
else:
print("Ok, drink")
print("------------")
print("ZADANIE 1")
min_likes = 500
min_shares = 100
num_likes = 1300
num_shares = 150
if num_likes >= min_likes and num_shares >= min_shares:
print("Ceny obnizamy o 10%")
elif num_likes < min_likes:
print("Za malo polubien na obnizke cen")
elif num_shares < min_shares:
print("Za mało udostepnien na obnizke cen")
print("ZADANIE 2")
isPizzaOrdered = True
isBigDrinkOrdered = False
isWeekend = False
if (isPizzaOrdered or isBigDrinkOrdered) and not isWeekend:
print("Masz pan kupon na burgera")
elif isWeekend:
print("Promocja tylko poza weekendem, ni mo kuponu")
else:
print("Kupon bedzie jak kupisz pizze lub duzy napoj") | w0jtech/python_udemy | 15_elif.py | 15_elif.py | py | 952 | python | en | code | 0 | github-code | 13 |
28813940416 | import os
from karton.android import Android
from karton.core import Task
from karton.core.test import KartonTestCase, TestResource
class AndroidMagicTestCase(KartonTestCase):
karton_class = Android
def test_android(self):
testcase = os.path.join(os.path.dirname(__file__), "testsdata", "example.apk")
with self.subTest(testcase):
with open(testcase, "rb") as f:
content = f.read()
sample = TestResource(testcase, content)
expected = Task(
{
"type": "sample",
"stage": "analyzed",
"origin": "karton.android",
},
payload={
"sample": sample,
"attributes": {
"certificate": ["61ED377E85D386A8DFEE6B864BD85B0BFAA5AF81"],
"main_activity": [
"com.example.android.contactmanager..ContactManager"
],
"package": ["com.example.android.contactmanager"],
"activities": [
"com.example.android.contactmanager..ContactManager",
"com.example.android.contactmanager.ContactAdder",
],
"permissions": [
"android.permission.GET_ACCOUNTS",
"android.permission.READ_CONTACTS",
"android.permission.WRITE_CONTACTS",
],
"certificate_not_after": ["Jul 17 01:33:46 2035 UTC"],
"certificate_serial": ["10623618503190643167"],
"certificate_subject": [
"Email Address: android@android.com,"
" Common Name: Android,"
" Organizational Unit: Android,"
" Organization: Android,"
" Locality: Mountain View,"
" State/Province: California,"
" Country: US"
],
"certificate_not_before": ["Feb 29 01:33:46 2008 UTC"],
"certificate_issuer": [
"Email Address: android@android.com,"
" Common Name: Android,"
" Organizational Unit: Android,"
" Organization: Android,"
" Locality: Mountain View,"
" State/Province: California,"
" Country: US"
],
"app_name": ["Contact Manager"],
},
},
)
task = Task(
{
"type": "sample",
"extension": "apk",
},
payload={"sample": sample},
)
results = self.run_task(task)
self.assertTasksEqual(results, [expected])
| jvoisin/karton-android | tests/test_android.py | test_android.py | py | 3,159 | python | en | code | 4 | github-code | 13 |
39777261562 | """ defining Global Variables """
assignments = []
def assign_value(values, box, value):
"""
Please use this function to update your values dictionary!
Assigns a value to a given box. If it updates the board record it.
"""
# Don't waste memory appending actions that don't actually change any values
if values[box] == value:
return values
values[box] = value
if len(value) == 1:
assignments.append(values.copy())
return values
def naked_twins(values):
"""Eliminate values using the naked twins strategy.
Args:
values(dict): a dictionary of the form {'box_name': '123456789', ...}
Returns:
the values dictionary with the naked twins eliminated from peers.
"""
rows = 'ABCDEFGHI'
cols = '123456789'
unitlist,peers = preprocessing(rows,cols,False)
for un in unitlist:
# Find all instances of naked twins in a unit
twin_collection=[]
for ib in un:
if len(values[ib]) == 2:
twin_collection.append(ib)
twinlistlist=[]
while len(twin_collection)>0:
elem = twin_collection.pop()
for el in twin_collection:
if values[el]==values[elem]:
twinlistlist.append([elem,el])
twin_collection.remove(el)
# Eliminate the naked twins as possibilities for their peers
for tw in twinlistlist:
for ib in un:
if len(values[ib])==1 or ib in tw:
continue
for ch in list(values[tw[0]]):
if ch in list(values[ib]):
al = list(values[ib])
al.remove(ch)
values[ib]=''.join(al)
return values
def cross(A, B):
"Cross product of elements in A and elements in B."
return ([i+j for i in A for j in B])
def preprocessing(rows, cols, Isdiagonal):
"""initalizing the grid with rows and cols
Args:
inputs are row and column names of the grid
if Isdiagonal is true then we have a diagonal Sudoku
hence we should include in the contraints in the diagonal
direction as well.
Returns:
units: wich should have boxes with unique boxes in tange 1..9
peers: all the boxes which share a unit with the given box
unitlist: list of all the units
"""
boxes = cross(rows, cols)
row_units = [cross(r, cols) for r in rows]
column_units = [cross(rows, c) for c in cols]
square_units = [cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')]
unitlist = row_units + column_units + square_units
""" Adding the diagonal Constraints:
1- We have two more units in diagonal directions
2- All the peers should be updated as well
"""
if (Isdiagonal):
diag_unit = [[i+j for i,j in zip(list(rows),list(cols))]]
colslist = list(cols)
colslist.reverse()
inv_diag_unit = [[i+j for i,j in zip(list(rows),colslist)]]
unitlist = unitlist + diag_unit + inv_diag_unit
units = dict((s, [u for u in unitlist if s in u]) for s in boxes)
peers = dict((s, set(sum(units[s],[]))-set([s])) for s in boxes)
return unitlist,peers
def grid_values(grid, Isdiagonal):
"""
Convert grid into a dict of {square: char} with '123456789' for empties.
Args:
grid(string) - A grid in string form.
Returns:
A grid in dictionary form
Keys: The boxes, e.g., 'A1'
Values: The value in each box, e.g., '8'. If the box has no value, then the value will be '123456789'.
units: wich should have boxes with unique boxes in tange 1..9
peers: all the boxes which share a unit with the given box
unitlist: list of all the units
"""
rows = 'ABCDEFGHI'
cols = '123456789'
unitlist,peers = preprocessing(rows,cols,Isdiagonal)
sudodic = {}
num = 0
for key1 in rows:
for key2 in cols:
key = key1+key2
sudodic[key]=grid[num]
if sudodic[key] == '.':
sudodic[key] = cols
num += 1
return sudodic,unitlist,peers
def display(values):
"""
Display the values as a 2-D grid.
Args:
values(dict): The sudoku in dictionary form
"""
rows = 'ABCDEFGHI'
cols = '123456789'
width = 1+max(len(vals) for _,vals in values.items())
line = '+'.join(['-'*(width*3)]*3)
for r in rows:
print(''.join(values[r+c].center(width)+('|' if c in '36' else '')
for c in cols))
if r in 'CF': print(line)
return
def eliminate(values,unitlist,peers):
"""Eliminate values from peers of each box with a single value.
Go through all the boxes, and whenever there is a box with a single value,
eliminate this value from the set of values of all its peers.
Args:
values: Sudoku in dictionary form.
peers: all the boxes which share a unit with the given box
Returns:
Resulting Sudoku in dictionary form after eliminating values.
"""
for key,val in values.items():
lval = list(val)
if len(val)>1:
continue
for keyP in peers[key]:
lvalp = list(values[keyP])
lvalp = [item for item in lvalp if item not in lval]
values = assign_value(values,keyP,''.join(lvalp))
#values[keyP] = ''.join(lvalp)
return values
def only_choice(values,unitlist,peers):
"""Finalize all values that are the only choice for a unit.
Go through all the units, and whenever there is a unit with a value
that only fits in one box, assign the value to this box.
Input:
Sudoku in dictionary form.
unitlist with the list of all the units
Output: Resulting Sudoku in dictionary form after filling in only choices.
"""
for un in unitlist:
cumcullist = []
for ib in un:
cumcullist = cumcullist + list(values[ib])
cumculdic = {num:cumcullist.count(num) for num in cumcullist}
for chr,cnt in cumculdic.items():
if cnt == 1:
for ib in un:
if chr in list(values[ib]):
values = assign_value(values,ib,chr)
#values[ib] = chr
break;
return values
def reduce_puzzle(values,unitlist,peers):
"""
Applying the constraints on the grid
Args:
values: the grid
Isdiagonal: a boolean to activate diagonal sudoku
peers: all the boxes which share a unit with the given box
unitlist with the list of all the units
Return:
the grid gone through elimination, only_choice, naked_twins
"""
stalled = False
while not stalled:
# Check how many boxes have a determined value
solved_values_before = len([box for box in values.keys() if len(values[box]) == 1])
# Use the Eliminate Strategy
values = eliminate(values,unitlist,peers)
# Use the Only Choice Strategy
values = only_choice(values,unitlist,peers)
# Use naked twins to get rid of twin values
#values = naked_twins(values)
# Check how many boxes have a determined value, to compare
solved_values_after = len([box for box in values.keys() if len(values[box]) == 1])
# If no new values were added, stop the loop.
stalled = solved_values_before == solved_values_after
# Sanity check, return False if there is a box with zero available values:
if len([box for box in values.keys() if len(values[box]) == 0]):
return False
return values
def search(values,unitlist,peers):
"""Using depth-first search and propagation, try all possible values.
Args:
values: the grid
Isdiagonal: a boolean to activate diagonal sudoku
peers: all the boxes which share a unit with the given box
unitlist with the list of all the units
Return:
the grid gone through recursive depth first search
"""
rows = 'ABCDEFGHI'
cols = '123456789'
boxes = cross(rows, cols)
# First, reduce the puzzle using the previous function
values = reduce_puzzle(values,unitlist,peers)
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in boxes):
return values ## Solved!
# Choose one of the unfilled squares with the fewest possibilities
n,s = min((len(values[s]), s) for s in boxes if len(values[s]) > 1)
# Now use recurrence to solve each one of the resulting sudokus, and
for value in values[s]:
new_sudoku = values.copy()
new_sudoku[s] = value
attempt = search(new_sudoku,unitlist,peers)
if attempt:
return attempt
def solve(grid, Isdiagonal = True):
"""
Find the solution to a Sudoku grid.
Args:
grid(string): a string representing a sudoku grid.
Example: '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
Returns:
The dictionary representation of the final sudoku grid. False if no solution exists.
"""
sudodic,unitlist,peers= grid_values(grid, Isdiagonal)
return search(sudodic,unitlist,peers)
if __name__ == '__main__':
diag_sudoku_grid = '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3'
display(solve(diag_sudoku_grid, True))
try:
from visualize import visualize_assignments
visualize_assignments(assignments)
except SystemExit:
pass
except:
print('We could not visualize your board due to a pygame issue. Not a problem! It is not a requirement.')
| RezaUBC/AIND-Sudoku | solution.py | solution.py | py | 9,874 | python | en | code | 0 | github-code | 13 |
43768858326 | from colorama import Fore, Style
from pprint import pprint
def split_image_into_layers(image_data, img_width, img_height):
surface = img_width * img_height
amount_of_layers = int(len(image_data) / surface)
layers = []
for layer_index in range(amount_of_layers):
layer_data = image_data[layer_index * surface : (layer_index + 1) * surface]
layer = {
"index": layer_index,
"data": layer_data,
"zero_count": layer_data.count("0"),
"one_count": layer_data.count("1"),
"two_count": layer_data.count("2"),
}
layers.append(layer)
return layers
def decode_image(layers):
image = [int(pixel) for pixel in layers[0]["data"]]
for layer in layers[1:]:
pixel_index = 0
for layer_pixel in map(int, layer["data"]):
if image[pixel_index] == 2 and layer_pixel != 2:
image[pixel_index] = layer_pixel
pixel_index += 1
return image
def run_star15():
img_width, img_height = 25, 6
with open("input/star15") as input_file:
layers = split_image_into_layers(
input_file.read().strip(), img_width, img_height
)
min_layer = min(layers, key=lambda layer: layer["zero_count"])
one_multiplied_by_two = min_layer["one_count"] * min_layer["two_count"]
return f"Final number: {one_multiplied_by_two}"
def run_star16():
img_width, img_height = 25, 6
with open("input/star15") as input_file:
layers = split_image_into_layers(
input_file.read().strip(), img_width, img_height
)
print(f"Final image:")
image = decode_image(layers)
for y in range(img_height):
for x in range(img_width):
pixel_index = y * img_width + x
pixel_str = image[pixel_index] if image[pixel_index] == 1 else " "
print(pixel_str, end=" ")
print()
return f"Image message: CYUAH"
| xheory/advent-of-code-2019 | stars/star15and16.py | star15and16.py | py | 1,956 | python | en | code | 0 | github-code | 13 |
7829619130 | import waffle
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.templatetags.static import static
from django.utils.timezone import is_naive
from requests import Session
from cl.lib import search_utils
from cl.lib.elasticsearch_utils import do_es_feed_query
from cl.lib.podcast import iTunesPodcastsFeedGenerator
from cl.lib.scorched_utils import ExtraSolrInterface
from cl.lib.timezone_helpers import localize_naive_datetime_to_court_timezone
from cl.search.documents import AudioDocument
from cl.search.feeds import JurisdictionFeed, get_item
from cl.search.forms import SearchForm
from cl.search.models import SEARCH_TYPES, Court
class JurisdictionPodcast(JurisdictionFeed):
feed_type = iTunesPodcastsFeedGenerator
description = (
"A chronological podcast of oral arguments with improved "
"files and meta data. Hosted by Free Law Project through "
"the CourtListener.com initiative. Not an official podcast."
)
subtitle = description
summary = description
iTunes_name = "Free Law Project"
iTunes_email = "feeds@courtlistener.com"
iTunes_image_url = f"https://storage.courtlistener.com{static('png/producer-2000x2000.png')}"
iTunes_explicit = "no"
item_enclosure_mime_type = "audio/mpeg"
def title(self, obj):
_, court = obj
return f"Oral Arguments for the {court.full_name}"
def get_object(self, request, court):
return request, get_object_or_404(Court, pk=court)
def items(self, obj):
"""
Returns a list of items to publish in this feed.
"""
request, court = obj
if not waffle.flag_is_active(request, "oa-es-active"):
with Session() as session:
solr = ExtraSolrInterface(
settings.SOLR_AUDIO_URL, http_connection=session, mode="r"
)
params = {
"q": "*",
"fq": f"court_exact:{court.pk}",
"sort": "dateArgued desc",
"rows": "20",
"start": "0",
"caller": "JurisdictionPodcast",
}
items = solr.query().add_extra(**params).execute()
return items
else:
cd = {
"q": "*",
"court": court.pk,
"order_by": "dateArgued desc",
"type": SEARCH_TYPES.ORAL_ARGUMENT,
}
search_query = AudioDocument.search()
items = do_es_feed_query(search_query, cd, rows=20)
return items
def feed_extra_kwargs(self, obj):
extra_args = {
"iTunes_name": self.iTunes_name,
"iTunes_email": self.iTunes_email,
"iTunes_explicit": self.iTunes_explicit,
}
if isinstance(obj, tuple) and hasattr(obj[1], "pk"):
path = static(f"png/producer-{obj[1].pk}-2000x2000.png")
else:
# Not a jurisdiction API -- A search API.
path = static("png/producer-2000x2000.png")
extra_args[
"iTunes_image_url"
] = f"https://storage.courtlistener.com{path}"
return extra_args
def item_extra_kwargs(self, item):
return {
"author": get_item(item)["court"],
"duration": str(item["duration"]),
"explicit": "no",
}
def item_enclosure_url(self, item):
path = get_item(item)["local_path"]
return f"https://storage.courtlistener.com/{path}"
def item_enclosure_length(self, item):
return get_item(item)["file_size_mp3"]
def item_pubdate(self, item):
pub_date = get_item(item)["dateArgued"]
if not pub_date:
return None
if is_naive(pub_date):
pub_date = localize_naive_datetime_to_court_timezone(
get_item(item)["court"], pub_date
)
return pub_date
description_template = None
def item_description(self, item):
return get_item(item)["caseName"]
def item_categories(self, item):
return None
class AllJurisdictionsPodcast(JurisdictionPodcast):
title = (
"CourtListener.com: Podcast of All Oral Arguments available in "
"the Federal Circuit Courts (High Volume)"
)
def get_object(self, request):
return request
def items(self, obj):
if not waffle.flag_is_active(obj, "oa-es-active"):
with Session() as session:
solr = ExtraSolrInterface(
settings.SOLR_AUDIO_URL, http_connection=session, mode="r"
)
params = {
"q": "*",
"sort": "dateArgued desc",
"rows": "20",
"start": "0",
"caller": "AllJurisdictionsPodcast",
}
items = solr.query().add_extra(**params).execute()
return items
else:
cd = {
"q": "*",
"order_by": "dateArgued desc",
"type": SEARCH_TYPES.ORAL_ARGUMENT,
}
search_query = AudioDocument.search()
items = do_es_feed_query(search_query, cd, rows=20)
return items
class SearchPodcast(JurisdictionPodcast):
title = "CourtListener.com Custom Oral Argument Podcast"
def get_object(self, request, get_string):
return request
def items(self, obj):
search_form = SearchForm(obj.GET)
if search_form.is_valid():
cd = search_form.cleaned_data
if not waffle.flag_is_active(obj, "oa-es-active"):
with Session() as session:
solr = ExtraSolrInterface(
settings.SOLR_AUDIO_URL,
http_connection=session,
mode="r",
)
main_params = search_utils.build_main_query(
cd, highlight=False, facet=False
)
main_params.update(
{
"sort": "dateArgued desc",
"rows": "20",
"start": "0",
"caller": "SearchFeed",
}
)
items = solr.query().add_extra(**main_params).execute()
return items
else:
override_params = {
"order_by": "dateArgued desc",
}
cd.update(override_params)
search_query = AudioDocument.search()
items = do_es_feed_query(
search_query,
cd,
rows=20,
)
return items
else:
return []
| freelawproject/courtlistener | cl/audio/feeds.py | feeds.py | py | 6,942 | python | en | code | 435 | github-code | 13 |
34114246474 | from __future__ import annotations
from netqasm.runtime.settings import Simulator, get_simulator
if get_simulator() == Simulator.NETSQUID:
import netsquid as ns
import numpy as np
from netsquid.qubits import operators, qubitapi
from netsquid.qubits.qubit import Qubit as NetSquidQubit
def qubit_from(phi: float, theta: float) -> NetSquidQubit:
"""Only used for simulation output purposes.
Uses the phi and theta angles to construct a NetSquid qubit."""
if get_simulator() != Simulator.NETSQUID:
raise RuntimeError(
"`qubit_from` function only possible with NetSquid simulator"
)
q = ns.qubits.create_qubits(1)[0]
rot_y = operators.create_rotation_op(theta, (0, 1, 0))
rot_z = operators.create_rotation_op(phi, (0, 0, 1))
ns.qubits.operate(q, rot_y)
ns.qubits.operate(q, rot_z)
return q
def to_dm(q: NetSquidQubit) -> np.ndarray:
"""Only used for simulation output purposes."""
if get_simulator() != Simulator.NETSQUID:
raise RuntimeError("`to_dm` function only possible with NetSquid simulator")
return ns.qubits.reduced_dm(q) # type: ignore
def get_fidelity(q1: NetSquidQubit, q2: np.ndarray) -> float:
"""Only used for simulation output purposes.
Gets the fidelity between the states q1 and q2"""
if get_simulator() != Simulator.NETSQUID:
raise RuntimeError(
"`get_fidelity` function only possible with NetSquid simulator"
)
return qubitapi.fidelity(q1, q2) # type: ignore
| QuTech-Delft/netqasm | netqasm/sdk/toolbox/sim_states.py | sim_states.py | py | 1,532 | python | en | code | 17 | github-code | 13 |
7716343150 | from google.oauth2.service_account import Credentials
from googleapiclient.discovery import build
import io
from googleapiclient.http import MediaIoBaseDownload
# Authenticate using the same service account as before
creds = Credentials.from_service_account_file('google_sheets_access.json')
# Create a Drive API client
drive_service = build('drive', 'v3', credentials=creds)
def get_file_id_from_url(file_url):
"""
Given a Google Drive file URL, returns the file ID.
"""
split_url = file_url.split("/")
file_id = split_url[5]
return file_id
def get_json_data(filename):
"""
return (content, message)
content: bytes object
message: string
"""
json_content = None
file_name = None
try:
file_id = get_file_id_from_url(filename)
except Exception as e:
print(e)
return None, "Unable to retrieve Scene JSON."
try:
# Use the Drive API client to download the file
file = drive_service.files().get(fileId=file_id).execute()
file_name = file['name']
file_content = io.BytesIO()
request = drive_service.files().get_media(fileId=file_id)
media = MediaIoBaseDownload(file_content, request)
done = False
while done is False:
_, done = media.next_chunk()
json_content = file_content.getvalue()
try:
json_content = json_content.decode("utf-8")
except UnicodeDecodeError:
json_content = json_content
message = f"Successfully downloaded {file_name}."
except Exception as e:
message = e
return json_content, file_name, message
if __name__ == "__main__":
print(get_json_data("https://drive.google.com/file/d/119s7vwybLpmjMgffnyOlBrv7aH3R1ncU/view?usp=share_link")) | dhowe/prompt-gen-client | drive_files.py | drive_files.py | py | 1,808 | python | en | code | 0 | github-code | 13 |
29376712623 | n = int(input("Write the number of digits you want to show:"))
f = [0, 1]
numbers = 2
for digit in f:
if numbers == n:
break
else:
digit_next = f[-1] + f[-2]
f.append(digit_next)
numbers = numbers + 1
print(f)
| kolyasalubov/Lv-14.03.PythonFundamentals | AnastasiiaKaravaieva/HW_5/2.py | 2.py | py | 244 | python | en | code | 0 | github-code | 13 |
74870464336 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class CBTInserter:
def __init__(self, root: TreeNode):
self.root = root
self.queue = [root]
self.left = True
end = False
while not end and len(self.queue)>0:
new_queue = []
while not end and len(self.queue)>0:
node = self.queue.pop(0)
if not node.left and not node.right:
new_queue.insert(0, node)
if node.left:
new_queue.append(node.left)
if node.right:
new_queue.append(node.right)
else:
self.queue.insert(0, node)
end = True
if end:
new_queue = self.queue + new_queue
self.queue = new_queue
def insert(self, val: int) -> int:
node = TreeNode(val)
ret = self.queue[0].val
print(self.queue)
if self.queue[0].left:
self.queue[0].right = node
self.queue.pop(0)
else:
self.queue[0].left = node
self.queue.append(node)
return ret
def get_root(self) -> TreeNode:
return self.root
# Your CBTInserter object will be instantiated and called as such:
# obj = CBTInserter(root)
# param_1 = obj.insert(val)
# param_2 = obj.get_root() | yeung66/leetcode-everyday | py/919.py | 919.py | py | 1,531 | python | en | code | 0 | github-code | 13 |
74175728018 | #!/usr/bin/env python3
from day01.main import load, test
from day02.main import load_memory
from day05.main import Program
def part1(filename):
memory = load_memory(filename, script=__file__)
return list(Program(memory, [1]).run_computer())
def part2(filename):
memory = load_memory(filename, script=__file__)
return list(Program(memory, [2]).run_computer())
if __name__== "__main__":
test([109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99], part1('input-test-1.txt'))
test(16, len(str(part1('input-test-2.txt')[0])))
test([1125899906842624], part1('input-test-3.txt'))
test([2171728567], part1('input.txt'))
test([49815], part2('input.txt'))
| andrewmacheret/aoc | 2019/python/day09/main.py | main.py | py | 673 | python | en | code | 0 | github-code | 13 |
16711872703 | from argparse import ArgumentParser
from pathlib import Path
import os
import torch
import logging
import json
import random
import numpy as np
from collections import namedtuple
from tempfile import TemporaryDirectory
from torch.utils.data import DataLoader, Dataset, RandomSampler, Sampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from pytorch_transformers import WEIGHTS_NAME, CONFIG_NAME, BertForMaskedLM
from pytorch_transformers.modeling_bert import BertForPreTraining
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
import torch.nn.functional as F
from util import MAX_TURN, PREVENT_FACTOR, PROMOTE_FACTOR, PREVENT_LIST, REDUCE_LIST, STOP_LIST, boolean_string
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids no_ins")
log_format = '%(asctime)-10s: %(message)s'
logging.basicConfig(level=logging.INFO, format=log_format)
logger = logging.getLogger(__name__)
NOI_ID = 1
class Node(object):
def __init__(self, input_ids, segment_ids, input_mask, score, shift, length, pos_start, input_len_start):
super(Node, self).__init__()
self.input_ids = input_ids
self.segment_ids = segment_ids # parent Node, None for root
self.input_mask = input_mask
self.score = score
self.shift = shift
self.length=length
self.pos_start=pos_start
self.input_len_start=input_len_start
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def convert_example_to_features(example, tokenizer, max_seq_length, id = 0, no_ins_at_first = False, tokenizing = False):
tokens = ["[CLS]"] + example
if len([x for t in tokens for x in tokenizer.encode(t)]) > max_seq_length:
logging.info(f"Warning: input id-{id} exceeds max sequence length limit!")
tokens = ["[CLS]"] + ["Error : Input exceeds length limit;"]
no_ins = [0] if no_ins_at_first else []
if tokenizing:
#input_ids = tokenizer.encode(" ".join(tokens))
input_ids = [x for t in tokens for x in tokenizer.encode(t)]
input_ids_lens = [len(tokenizer.encode(t)) for t in tokens]
cur = 0
for l in input_ids_lens:
if l >=2 :
no_ins.extend([cur + x for x in range(0,l-1)])
cur += l
else:
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_array = np.zeros(max_seq_length, dtype=np.int)
input_array[:len(input_ids)] = input_ids
mask_array = np.zeros(max_seq_length, dtype=np.bool)
mask_array[:len(input_ids)] = 1
segment_array = np.zeros(max_seq_length, dtype=np.bool)
lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
no_ins_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
no_ins_array[:len(no_ins)] = no_ins
features = InputFeatures(input_ids=input_array,
input_mask=mask_array,
segment_ids=segment_array,
lm_label_ids=lm_label_array,
no_ins=no_ins_array,
)
return features
class PregeneratedDataset(Dataset):
def __init__(self, training_path, epoch, tokenizer, num_data_epochs, max_seq_len = 256, sep=" ", no_ins_at_first = False, reduce_memory=False):
self.vocab = tokenizer.vocab
self.tokenizer = tokenizer
self.epoch = epoch
self.data_epoch = epoch % num_data_epochs
data_file = training_path
num_samples = sum(1 for line in open(data_file))
self.num_samples = num_samples
seq_len = max_seq_len
self.temp_dir = None
self.working_dir = None
if reduce_memory:
self.temp_dir = TemporaryDirectory()
self.working_dir = Path(self.temp_dir.name)
input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
no_ins = np.memmap(filename=self.working_dir/'no_ins.memmap',
shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
lm_label_ids[:] = -1
else:
input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
no_ins = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
logging.info(f"Loading training examples for epoch {epoch}")
with data_file.open() as f:
for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
if i >= num_samples:
break
line = line.strip()
example = [s.lstrip().strip() for s in line.split(sep)]
features = convert_example_to_features(example, tokenizer, seq_len, no_ins_at_first = no_ins_at_first, id = i, tokenizing=True)
input_ids[i] = features.input_ids
segment_ids[i] = features.segment_ids
input_masks[i] = features.input_mask
lm_label_ids[i] = features.lm_label_ids
no_ins[i] = features.no_ins
if i != num_samples - 1:
logging.info("i={} not equal to num_samples={}".format(i, num_samples))
logging.info("Loading complete!")
self.num_samples = num_samples
self.seq_len = seq_len
self.input_ids = input_ids
self.input_masks = input_masks
self.segment_ids = segment_ids
self.lm_label_ids = lm_label_ids
self.no_ins = no_ins
def __len__(self):
return self.num_samples
def __getitem__(self, item):
return (torch.tensor(self.input_ids[item].astype(np.int64)),
torch.tensor(self.input_masks[item].astype(np.int64)),
torch.tensor(self.segment_ids[item].astype(np.int64)),
torch.tensor(self.lm_label_ids[item].astype(np.int64)),
torch.tensor(self.no_ins[item].astype(np.int64)),
)
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
def greedy_search(model, input_ids, segment_ids, input_mask, no_ins = None, device='cuda', temperature=1.0, args=None, tokenizer=None, prevent=None, promote=None, reduce=None, verbose = None):
# print("greedy generation")
if not verbose:
verbose = args.verbose
zero_list = ["[", "]", "(", ")"]
zero_ids = [ tokenizer.vocab.get(x) for x in zero_list]
if verbose >0:
print("\nInput %s" % (" ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) for x in input_ids[0].detach().cpu().numpy() if x!=0])))
no_ins_cur = no_ins[0][:(no_ins[0]==-1).nonzero()[0]]
for ip in range(MAX_TURN):
with torch.no_grad():
result= model(input_ids, segment_ids, input_mask)
mask_prediction_scores = result[0]
input_len = torch.sum(input_mask,1)
noi_temp = min(float(ip) / args.noi_decay, 1.0)
mask_prediction_scores[:,:,1] = mask_prediction_scores[:,:,1] * noi_temp
logits = mask_prediction_scores / temperature
if prevent:
for p in prevent:
logits[:,:,p] = logits[:,:,p] * PREVENT_FACTOR
if reduce:
reduce_factor = min(float(ip) / args.reduce_decay, 1.0)
for p in reduce:
logits[:,:,p] = logits[:,:,p] * reduce_factor
if promote:
for p in promote:
logits[:,:,p] = logits[:,:,p] * PROMOTE_FACTOR
if args.lessrepeat:
for p in input_ids.cpu().numpy()[0]:
logits[:,:,p] = logits[:,:,p] * 0.8
logits[:,:, zero_ids] = -1e10
probs = F.softmax(logits, dim=-1)
input_ids_new = torch.zeros_like(input_ids)
top_predicts = torch.zeros([input_ids.shape[0], input_ids.shape[1], 3], dtype=torch.long)
mask_predicts = probs.argmax(2)
for t in range(args.max_seq_length):
top_predicts[:,t] = torch.topk(probs[:,t,:], k=3)[1]
input_mask_new = torch.zeros_like(input_mask)
logit_new = torch.zeros_like(input_ids,dtype=torch.float)
input_ids_ori = input_ids
top_predicts_new = torch.zeros_like(top_predicts)
i = 0
j = 0
k = 0
sep_tok = tokenizer.vocab['[SEP]']
# update no_ins
mask_predicts[0][no_ins_cur] = NOI_ID #
new_no_ins_cur = no_ins_cur.clone().detach()
# [tokenizer.decode([x.tolist()]) for x in input_ids[0] if x!= 0]
while np.max([i,j,k]) < args.max_seq_length-1:
input_ids_new[0,k] = input_ids[0,i]
if input_ids[0,i] == 0: # padding, ignore prediction
break
if input_ids[0,i] == sep_tok:
break
i += 1
k += 1
if mask_predicts[0,j].cpu().numpy() != NOI_ID:
input_ids_new[0,k] = mask_predicts[0,j]
logit_new[0,k] = probs[0,j,mask_predicts[0,j]]
top_predicts_new[0,k,:] = top_predicts[0,j,:]
if len(no_ins_cur)> 0 and no_ins_cur[-1] > j:
new_no_ins_cur[torch.where(no_ins_cur > j)[0][0]:] += 1
k+=1
j+=1
else:
j+=1
no_ins_cur = new_no_ins_cur
mask_pos = input_ids_new > 1
input_ids = input_ids_new
input_mask = mask_pos
logit_new = logit_new.detach().cpu().numpy()
top_predicts_new = top_predicts_new.detach().cpu().numpy()
if verbose == 0:
pass
elif verbose == 2:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) + (("(" + "{:.2f}".format(float(logit_new[0,i])) + ")") if logit_new[0,i] > 0 else "") for i, x in enumerate(input_ids[0].detach().cpu().numpy()) if x!=0])))
elif verbose == 3:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) + (("(" + "{:.2f}".format(float(logit_new[0,i])) + " "+ " ".join([str(tokenizer.ids_to_tokens.get(y, "noa").encode('ascii', 'ignore').decode('ascii')) for y in top_predicts_new[0,i,:]]) + ")") if logit_new[0,i] > 0 else "") for i, x in enumerate(input_ids[0].detach().cpu().numpy()) if x!=0])))
else:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) for x in input_ids[0].detach().cpu().numpy() if x!=0])))
return input_ids
def sample_generate(model, input_ids, segment_ids, input_mask, no_ins = None, device='cuda', temperature=1.0, args=None, tokenizer=None, sample_num=1, top_k=10, top_p=0.9, prevent=None, promote=None, reduce=None, verbose = None):
if not verbose:
verbose = args.verbose
zero_list = ["[", "]", "(", ")"]
zero_ids = [ tokenizer.vocab.get(x) for x in zero_list]
if verbose>0:
print("\nInput %s" % (" ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) for x in input_ids[0].detach().cpu().numpy() if x!=0])))
no_ins_cur = no_ins[0][:(no_ins[0]==-1).nonzero()[0]]
for ip in range(MAX_TURN):
with torch.no_grad():
result= model(input_ids, segment_ids, input_mask)
mask_prediction_scores = result[0]
input_len = torch.sum(input_mask,1)
noi_temp = min(float(ip) / args.noi_decay, 1.0)
mask_prediction_scores[:,:,1] = mask_prediction_scores[:,:,1] * noi_temp
logits = mask_prediction_scores / temperature
if prevent:
for p in prevent:
logits[:,:,p] = logits[:,:,p] * PREVENT_FACTOR
if reduce:
reduce_factor = min(float(ip) / args.reduce_decay, 1.0)
for p in reduce:
logits[:,:,p] = logits[:,:,p] * reduce_factor
if promote:
for p in promote:
logits[:,:,p] = logits[:,:,p] * PROMOTE_FACTOR
if args.lessrepeat:
for p in input_ids.cpu().numpy()[0]:
logits[:,:,p] = logits[:,:,p] * 0.8
logits[:,:, zero_ids] = -1e10
for i in range(args.max_seq_length):
logits[:,i] = top_k_top_p_filtering(logits[:,i].squeeze(), top_k = top_k, top_p = top_p)
probs = F.softmax(logits, dim=-1)
input_ids_new = torch.zeros_like(input_ids)
top_predicts = torch.zeros([input_ids.shape[0], input_ids.shape[1], 3], dtype=torch.long)
mask_predicts = torch.zeros_like(input_ids, dtype=torch.long)
for t in range(args.max_seq_length):
mask_predicts[:,t] =torch.multinomial(probs[:,t,:], num_samples=1)
top_predicts[:,t] = torch.topk(probs[:,t,:], k=3)[1]
logit_new = torch.zeros_like(input_ids,dtype=torch.float)
input_ids_ori = input_ids
top_predicts_new = torch.zeros_like(top_predicts)
i = 0
j = 0
k = 0
sep_tok = tokenizer.vocab['[SEP]']
# update no_ins
mask_predicts[0][no_ins_cur] = NOI_ID #
new_no_ins_cur = no_ins_cur.clone().detach()
while np.max([i,j,k]) < args.max_seq_length-1:
# print(i,j,k)
input_ids_new[0,k] = input_ids[0,i]
if input_ids[0,i] == 0: # padding, ignore prediction
break
if input_ids[0,i] == sep_tok:
break
i += 1
k += 1
if mask_predicts[0,j].cpu().numpy() != 1:
input_ids_new[0,k] = mask_predicts[0,j]
logit_new[0,k] = probs[0,j,mask_predicts[0,j]]
top_predicts_new[0,k,:] = top_predicts[0,j,:]
if len(no_ins_cur)> 0 and no_ins_cur[-1] > j:
new_no_ins_cur[torch.where(no_ins_cur > j)[0][0]:] += 1
k+=1
j+=1
else:
j+=1
no_ins_cur = new_no_ins_cur
mask_pos = input_ids_new > 1
input_ids = input_ids_new
input_mask = mask_pos
logit_new = logit_new.detach().cpu().numpy()
top_predicts_new = top_predicts_new.detach().cpu().numpy()
if verbose == 0:
pass
elif verbose == 2:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) + (("(" + "{:.2f}".format(float(logit_new[0,i])) + ")") if logit_new[0,i] > 0 else "") for i, x in enumerate(input_ids[0].detach().cpu().numpy()) if x!=0])))
elif verbose == 3:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) + (("(" + "{:.2f}".format(float(logit_new[0,i])) + " "+ " ".join([str(tokenizer.ids_to_tokens.get(y, "noa").encode('ascii', 'ignore').decode('ascii')) for y in top_predicts_new[0,i,:]]) + ")") if logit_new[0,i] > 0 else "") for i, x in enumerate(input_ids[0].detach().cpu().numpy()) if x!=0])))
else:
print("Round %d: %s" % (ip, " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) for x in input_ids[0].detach().cpu().numpy() if x!=0])))
return input_ids
def main():
parser = ArgumentParser()
parser.add_argument('--keyfile', type=Path, required=True)
parser.add_argument('--output_dir', type=Path, required=False, default=None)
parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--do_lower_case",
type=boolean_string,
default=False,
)
parser.add_argument("--reduce_memory",
type=boolean_string,
default=False,
help="Store training data as on-disc memmaps to massively reduce memory usage")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda",
type=boolean_string,
default=False,
help="Whether not to use CUDA when available")
parser.add_argument("--batch_size",
default=1,
type=int,
help="Total batch size for training.")
parser.add_argument('--fp16',
type=boolean_string,
default=False,
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument("--type",
default="greedy",
type=str,
choices=['greedy','sampling'],
help="greedy: greedy generation. sampling: top-k sampling")
parser.add_argument('--noi_decay',
type=int,
default=1,
help="round number to decay NOI prob")
parser.add_argument('--reduce_decay',
type=int,
default=1,
help="round number to decay reduce prob")
parser.add_argument('--verbose', type=int,
default=0,
help="verbose level")
parser.add_argument('--n_test',
type=int,
default=5000,
help="number of test examples")
parser.add_argument('--prevent',
type=boolean_string,
default=True,
help="avoid generating several words")
parser.add_argument('--reduce_stop',
type=boolean_string,
default=True,
help="reduce stopwords")
parser.add_argument('--lessrepeat',
type=boolean_string,
default=True,
help="reduce repetition (only for tokenwise)")
parser.add_argument('--sep',
type=str, default=" ", help="token to seperate keywords")
parser.add_argument('--max_seq_length',
type=int,
default=256,
help="max sequence length")
parser.add_argument("--no_ins_at_first",
type=boolean_string,
default=False,
help="Do not insert at the begining of the text")
args = parser.parse_args()
if not args.output_dir:
args.output_dir = args.bert_model
epoch_file = args.keyfile
# args.max_seq_length = 256
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
args.n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
args.output_mode = "classification"
if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
args.output_dir.mkdir(parents=True, exist_ok=True)
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
# Prepare model
model = BertForMaskedLM.from_pretrained(args.bert_model)
sep_tok = tokenizer.vocab['[SEP]']
cls_tok = tokenizer.vocab['[CLS]']
pad_tok = tokenizer.vocab['[PAD]']
model.to(device)
model.eval()
print(args)
epoch_dataset = PregeneratedDataset(epoch=0, training_path=args.keyfile, tokenizer=tokenizer, max_seq_len = args.max_seq_length, sep=args.sep, no_ins_at_first = args.no_ins_at_first, num_data_epochs=1)
epoch_sampler = SequentialSampler(epoch_dataset)
generate_dataloader = DataLoader(epoch_dataset, sampler=epoch_sampler,batch_size=args.batch_size)
file_name = os.path.join(args.output_dir, os.path.basename(args.keyfile)[:-3] + os.path.basename(args.bert_model) + f".{args.type}.txt")
f = open(file_name, "w", 1)
logging.info("***** Running generation *****")
logging.info(f" Num examples = {epoch_dataset.num_samples}")
logging.info(" Batch size = %d", args.batch_size)
logging.info(f" Save to {file_name}")
prevent = [ tokenizer.vocab.get(x) for x in PREVENT_LIST] if args.prevent else None
if args.reduce_stop:
# import pdb; pdb.set_trace()
reduce_l = REDUCE_LIST | STOP_LIST
reduce = None
if args.prevent:
reduce = [ tokenizer.vocab.get(x) for x in reduce_l]
reduce = [s for s in reduce if s]
with tqdm(total=len(generate_dataloader), desc=f"Epoch {0}") as pbar:
for step, batch in enumerate(generate_dataloader):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, lm_label_ids, no_ins = batch
if args.type == "greedy":
predict_ids = greedy_search(model, input_ids, segment_ids, input_mask, no_ins = no_ins, args=args, tokenizer=tokenizer, prevent=prevent, reduce= reduce)
elif args.type == 'sampling':
predict_ids = sample_generate(model, input_ids, segment_ids, input_mask, no_ins = no_ins, temperature=0.8, args=args, tokenizer=tokenizer, prevent=prevent, reduce= reduce)
else:
raise NotImplementedError
output = " ".join([str(tokenizer.ids_to_tokens.get(x, "noa").encode('ascii', 'ignore').decode('ascii')) for x in predict_ids[0].detach().cpu().numpy() if x!=sep_tok and x != pad_tok and x != cls_tok]) + "\n"
output = output.replace(" ##", "")
f.write(output)
pbar.update(1)
if __name__ == '__main__':
main()
| dreasysnail/POINTER | inference.py | inference.py | py | 26,546 | python | en | code | 111 | github-code | 13 |
21253004182 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__doc__ = 'description'
__author__ = '13314409603@163.com'
import os
import shutil
def main():
baseDir = r'A:\Bi-LSTM+CRF\原始_待分句_样例\cpws'
targetDir = r'C:\Users\13314\Desktop\test'
# choose2KB(baseDir,targetDir)
chooseNumberOfChar(baseDir,targetDir,1500)
def choose2KB(baseDir,targetDir):
count = 0
for i in range(10, 100):
dir = os.path.join(baseDir, str(i))
if (count == 150):
break
for fileName in os.listdir(dir):
path = os.path.join(dir, fileName)
if (os.path.getsize(path) > 1.9 * 1024 and os.path.getsize(path) < 2.1 * 1024):
with open(path, 'r', encoding='utf8') as f:
content = f.read()
if (content.find('诉称') != -1 and content.find('辩称') != -1):
shutil.copy(path, targetDir)
count += 1
print(path)
if (count == 150):
break
def chooseNumberOfChar(baseDir,targetDir,numberChars):
count = 0
content = ''
for i in range(10, 100):
dir = os.path.join(baseDir, str(i))
if (count == 20):
break
for fileName in os.listdir(dir):
path = os.path.join(dir, fileName)
with open(path, 'r', encoding='utf8') as f:
content = f.read()
if(abs(len(content)-numberChars)<30):
if (content.find('诉称') != -1 and content.find('辩称') != -1):
shutil.copy(path, targetDir)
count += 1
print(path)
if (count == 20):
break
if __name__ == '__main__':
main()
# pass | shengxiaoyu/BiLSTM_CRF_EVENT_DETECT | event_dectect/evaluation/chooseFileForExperiment.py | chooseFileForExperiment.py | py | 1,842 | python | en | code | 2 | github-code | 13 |
27613778185 | import torch
from torchvision.utils import save_image
import numpy as np
def denorm(tensor, device, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
std = torch.Tensor(std).reshape(-1, 1, 1).to(device)
mean = torch.Tensor(mean).reshape(-1, 1, 1).to(device)
res = torch.clamp(tensor * std + mean, 0, 1)
return res
def save_image_from_tensor_batch(batch, column, path, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], device='cpu'):
batch = denorm(batch, device, mean, std)
save_image(batch, path, nrow=column)
def mean_teacher(model, teacher, momentum=0.9995):
model_dict = model.state_dict()
teacher_dict = teacher.state_dict()
for k, v in teacher_dict.items():
teacher_dict[k] = v * momentum + (1 - momentum) * model_dict[k]
teacher.load_state_dict(teacher_dict)
def update_teacher(model, teacher, momentum=0.9995):
for ema_param, param in zip(teacher.parameters(), model.parameters()):
ema_param.data.mul_(momentum).add_(1 - momentum, param.data)
def warm_update_teacher(model, teacher, momentum=0.9995, global_step=2000):
momentum = min(1 - 1 / (global_step + 1), momentum)
for ema_param, param in zip(teacher.parameters(), model.parameters()):
ema_param.data.mul_(momentum).add_(1 - momentum, param.data)
def preprocess_teacher(model, teacher):
for param_m, param_t in zip(model.parameters(), teacher.parameters()):
param_t.data.copy_(param_m.data) # initialize
param_t.requires_grad = False # not update by gradient
def calculate_correct(scores, labels):
assert scores.size(0) == labels.size(0)
_, pred = scores.max(dim=1)
correct = torch.sum(pred.eq(labels)).item()
return correct
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
return float(np.exp(-5.0 * phase * phase))
def linear_rampup(current, rampup_length):
"""Linear rampup"""
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return current / rampup_length
def step_rampup(current, rampup_length):
assert current >= 0 and rampup_length >= 0
if current >= rampup_length:
return 1.0
else:
return 0.0
def get_current_consistency_weight(epoch, weight, rampup_length, rampup_type='step'):
if rampup_type == 'step':
rampup_func = step_rampup
elif rampup_type == 'linear':
rampup_func = linear_rampup
elif rampup_type == 'sigmoid':
rampup_func = sigmoid_rampup
else:
raise ValueError("Rampup schedule not implemented")
return weight * rampup_func(epoch, rampup_length) | MediaBrain-SJTU/FACT | utils/tools.py | tools.py | py | 2,941 | python | en | code | 134 | github-code | 13 |
15808947303 | from __future__ import print_function
import sys
# bold colors
_ansi = {'red': 91, 'yellow': 93}
def is_tty(stream): # taken from catkin_tools/common.py
"""Returns True if the given stream is a tty, else False"""
return hasattr(stream, 'isatty') and stream.isatty()
def colorize(msg, color, file=sys.stderr, alt_text=None):
if color and is_tty(file):
return '\033[%dm%s\033[0m' % (_ansi[color], msg)
elif alt_text:
return '%s%s' % (alt_text, msg)
else:
return msg
def message(msg, *args, **kwargs):
file = kwargs.get('file', sys.stderr)
alt_text = kwargs.get('alt_text', None)
color = kwargs.get('color', None)
print(colorize(msg, color, file, alt_text), *args, file=file)
def warning(*args, **kwargs):
defaults = dict(file=sys.stderr, alt_text='warning: ', color='yellow')
defaults.update(kwargs)
message(*args, **defaults)
def error(*args, **kwargs):
defaults = dict(file=sys.stderr, alt_text='error: ', color='red')
defaults.update(kwargs)
message(*args, **defaults)
| jincheng-ai/ros-melodic-python3-opencv4 | xacro/src/xacro/color.py | color.py | py | 1,065 | python | en | code | 5 | github-code | 13 |
31321235612 | # coding=utf-8
__author__ = "Daniel Arroyo <daniel@astroprint.com>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
import os.path
import glob
import logging
import threading
import uuid
import time
from threading import Event
from random import randrange
from astroprint.camera import CameraManager
class CameraMacManager(CameraManager):
name = 'mac'
def __init__(self):
super(CameraMacManager, self).__init__()
self._logger = logging.getLogger(__name__)
self._files = [f for f in glob.glob(os.path.join(os.path.realpath(os.path.dirname(__file__)+'/../../../local'),"camera_test*.jpeg"))]
self.cameraName = 'Test Camera'
self._opened = False
self._localFrame = None
self._localPeers = []
self.waitForPhoto = Event()
self._logger.info('Mac Simulation Camera Manager initialized')
def shutdown(self):
self._logger.info('Shutting Down Mac Camera Manager')
self._opened = False
self._localFrame = None
self._localPeers = []
self.waitForPhoto = Event()
def settingsStructure(self):
return {
'videoEncoding': [{"label": "H.264", "value": "h264"}, {"label": "VP8", "value": "vp8"}],
'frameSizes': [
{'value': '640x480', 'label': 'Low (640 x 480)'},
{'value': '1280x720', 'label': 'High (1280 x 720)'}
],
'fps': [
{'value': '5', 'label': '5 fps'},
{'value': '10', 'label': '10 fps'}
],
'cameraOutput': [
{'value': 'files', 'label': 'Files'}
],
"video_rotation": [
{"label": "No Rotation", "value": "0"},
{"label": "Rotate 90 degrees to the right", "value": "1"},
{"label": "Rotate 90 degrees to the left", "value": "3"},
{"label": "Flip horizontally", "value": "4"},
{"label": "Flip vertically", "value": "5"}
]
}
def _doOpenCamera(self):
self._opened = True
return True
def _doCloseCamera(self):
self._opened = False
return True
def _doGetPic(self, done, text=None):
if self.isCameraConnected():
threading.Timer(3.0, self._simulateGetPicAsync,[done, text]).start()
else:
done(None)
@property
def capabilities(self):
#return ['videoStreaming']
return []
def isCameraConnected(self):
return True
def hasCameraProperties(self):
return True
def isCameraOpened(self):
return self._opened
def isResolutionSupported(self, resolution):
return resolution == '640x480'
def _simulateGetPicAsync(self, done, text):
fileCount = len(self._files)
image = None
if fileCount:
imageFile = self._files[randrange(fileCount)]
with open(imageFile, "r") as f:
image = f.read()
done(image)
def reScan(self, broadcastChange = True):
return True
def isVideoStreaming(self):
return False
def removeLocalPeerReq(self,id):
self._localPeers.remove(id)
if len(self._localPeers) <= 0:
self.stop_local_video_stream()
self._logger.info('There are 0 local peers left')
def addLocalPeerReq(self):
id = uuid.uuid4().hex
self._localPeers.append(id)
self._logger.debug('number of local peers: %d' % len(self._localPeers))
if len(self._localPeers) == 1:
self.start_local_video_stream()
return id
def localSessionAlive(self,id):
return id in self._localPeers
def getFrame(self,id):
self.waitForPhoto.wait(2)
if self.waitForPhoto.isSet():
self.waitForPhoto.clear()
if id in self._localPeers:
return self._localFrame
else:#auto set after time
self.removeLocalPeerReq(id)
self.waitForPhoto.clear()
return None
def _responsePeersReq(self,photoData):
self._localFrame = photoData
def _onFrameTakenCallback(self,photoData):
if photoData:
if not self._localPeers:
self.stop_local_video_stream()
self._responsePeersReq(photoData)
self.waitForPhoto.set()
def start_local_video_stream(self):
fileCount = len(self._files)
if fileCount:
def imagesProducer():
while len(self._localPeers) > 0:
time.sleep(0.5)
def getRandomImg():
imageFile = self._files[randrange(fileCount)]
with open(imageFile, "r") as f:
image = f.read()
self._onFrameTakenCallback(image)
threading.Timer(0, getRandomImg).start()
threading.Timer(0, imagesProducer).start()
return
def stop_local_video_stream(self):
self._localPeers = []
| AstroPrint/AstroBox | src/astroprint/camera/mac.py | mac.py | py | 4,233 | python | en | code | 158 | github-code | 13 |
17055313074 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class LightPosOrderSku(object):
def __init__(self):
self._amount = None
self._name = None
self._price = None
self._quantity = None
self._sku_id = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def quantity(self):
return self._quantity
@quantity.setter
def quantity(self, value):
self._quantity = value
@property
def sku_id(self):
return self._sku_id
@sku_id.setter
def sku_id(self, value):
self._sku_id = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.quantity:
if hasattr(self.quantity, 'to_alipay_dict'):
params['quantity'] = self.quantity.to_alipay_dict()
else:
params['quantity'] = self.quantity
if self.sku_id:
if hasattr(self.sku_id, 'to_alipay_dict'):
params['sku_id'] = self.sku_id.to_alipay_dict()
else:
params['sku_id'] = self.sku_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = LightPosOrderSku()
if 'amount' in d:
o.amount = d['amount']
if 'name' in d:
o.name = d['name']
if 'price' in d:
o.price = d['price']
if 'quantity' in d:
o.quantity = d['quantity']
if 'sku_id' in d:
o.sku_id = d['sku_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/LightPosOrderSku.py | LightPosOrderSku.py | py | 2,570 | python | en | code | 241 | github-code | 13 |
73079353299 | import numpy as np
def save_model(weight , bias):
model = np.hstack((weight.reshape(-1) , bias))
np.save('model.npy' , model)
return
def load_model():
model = np.load('model.npy')
weight = model[ : -1].reshape(-1 , 1)
bias = model[-1]
return (weight , bias) | jnfem112/ML2020SPRING | hw1/model.py | model.py | py | 277 | python | en | code | 2 | github-code | 13 |
19284833300 | import json
import os
import re
import click
def walk_dir(root_path):
result = {}
for file in os.listdir(root_path):
print("正在索引:{}".format(file))
path = os.sep.join([root_path, file])
if os.path.isfile(path):
try:
result[file] = os.path.getsize(path)
except FileNotFoundError:
print("文件[{}]存在但读取时出现错误,本次索引可能不完整,请检查此文件详情后重试~".format(path))
continue
else:
per_torrent = {}
for root, dirs, file_list in os.walk(path):
for filename in file_list:
apath = os.sep.join([root, filename]) # 合并成一个完整路径
try:
alength = os.path.getsize(apath)
except (FileNotFoundError, OSError):
print("文件[{}]存在但读取时出现错误,本次索引可能不完整,请检查此文件详情后重试~".format(apath))
continue
per_torrent[apath.replace(path + os.sep, '')] = alength
result[file] = per_torrent
return {'base_dir': root_path, 'result': result}
@click.command()
@click.argument('path')
@click.option('--save-dir', default=os.getcwd(), help='索引文件保存的路径,默认为当前文件夹')
def main(path, save_dir):
"""
PATH: 需要索引的路径,举例:/home/xxx/downloads/, D:\\\\Downloads
"""
if not os.access(save_dir, os.W_OK):
print("保存路径[{}]不可写入,请提升权限或更改目录!".format(save_dir))
return
elif not os.access(path, os.R_OK):
print("索引路径[{}]不可读取,请检查路径是否正确!".format(path))
return
else:
print("开始索引,时间根据路径下文件零散程度不等,请耐心等待...")
result = walk_dir(path)
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
path = re.sub(rstr, "_", path) # 替换为下划线
result_file = '{}.json'.format(os.path.join(save_dir, path))
with open(result_file, 'w')as f:
f.write(json.dumps(result))
print("成功!保存路径:{}".format(result_file))
if __name__ == '__main__':
main()
| tongyifan/Reseed-backend | scripts/reseed.py | reseed.py | py | 2,331 | python | en | code | 326 | github-code | 13 |
57340179 | #!/usr/bin/env python
"""Python3 scripts running in scriptworker will use functions in this file.
Attributes:
log (logging.Logger): the log object for the module
"""
import asyncio
import logging
import os
import sys
import jsonschema
from immutabledict import immutabledict
from scriptworker_client.exceptions import ClientError, TaskVerificationError
from scriptworker_client.utils import load_json_or_yaml
log = logging.getLogger(__name__)
def get_task(config):
"""Read the task.json from work_dir.
Args:
config (dict): the running config, to find work_dir.
Returns:
dict: the contents of task.json
Raises:
ClientError: on error.
"""
path = os.path.join(config["work_dir"], "task.json")
message = "Can't read task from {}!\n%(exc)s".format(path)
contents = load_json_or_yaml(path, is_path=True, message=message)
return contents
def verify_json_schema(data, schema, name="task"):
"""Given data and a jsonschema, let's verify it.
This happens for tasks and chain of trust artifacts.
Args:
data (dict): the json to verify.
schema (dict): the jsonschema to verify against.
name (str, optional): the name of the json, for exception messages.
Defaults to "task".
Raises:
TaskVerificationError: on failure
"""
try:
jsonschema.validate(data, schema)
except jsonschema.exceptions.ValidationError as exc:
raise TaskVerificationError("Can't verify {} schema!\n{}".format(name, str(exc))) from exc
def verify_task_schema(config, task, schema_key="schema_file"):
"""Verify the task definition.
Args:
config (dict): the running config
task (dict): the running task
schema_key: the key in `config` where the path to the schema file is. Key can contain
dots (e.g.: 'schema_files.file_a')
Raises:
TaskVerificationError: if the task doesn't match the schema
"""
schema_path = config
schema_keys = schema_key.split(".")
try:
for key in schema_keys:
schema_path = schema_path[key]
task_schema = load_json_or_yaml(schema_path, is_path=True)
log.debug("Task is verified against this schema: {}".format(task_schema))
verify_json_schema(task, task_schema)
except (KeyError, OSError) as e:
raise TaskVerificationError("Cannot verify task against schema. Task: {}.".format(task)) from e
def sync_main(
async_main,
config_path=None,
default_config=None,
should_verify_task=True,
loop_function=asyncio.get_event_loop,
):
"""Entry point for scripts using scriptworker.
This function sets up the basic needs for a script to run. More specifically:
* it initializes the config
* the path to the config file is either taken from `config_path` or from `sys.argv[1]`.
* it verifies `sys.argv` doesn't have more arguments than the config path.
* it creates the asyncio event loop so that `async_main` can run
Args:
async_main (function): The function to call once everything is set up
config_path (str, optional): The path to the file to load the config from.
Loads from ``sys.argv[1]`` if ``None``. Defaults to None.
default_config (dict, optional): the default config to use for ``init_config``.
defaults to None.
should_verify_task (bool, optional): whether we should verify the task
schema. Defaults to True.
loop_function (function, optional): the function to call to get the
event loop; here for testing purposes. Defaults to
``asyncio.get_event_loop``.
"""
config = init_config(config_path, default_config)
_init_logging(config)
task = get_task(config)
if should_verify_task:
verify_task_schema(config, task)
loop = loop_function()
loop.run_until_complete(_handle_asyncio_loop(async_main, config, task))
def init_config(config_path=None, default_config=None, validator_callback=None):
"""Initialize the config.
First, read config overrides from ``config_path``. Apply over
``default_config``. Send to ``validator_config``, then return a immutabledict
of the config.
Args:
config_path (str, optional): the path to the config file. Defaults to
``sys.argv[1]``.
default_config (dict, optional): the config defaults. These are the
config values if not overridden in ``config_path``. Defaults to
``{}``.
validator_callback (function, optional): a function that takes a single
arg (``config``), and raises an exception if invalid. If ``None``,
don't validate the config. Defaults to ``None``.
Raises:
Exception: if the config doesn't pass the ``validator_callback``.
Returns:
immutabledict: the config.
"""
if config_path is None:
if len(sys.argv) != 2:
_usage()
config_path = sys.argv[1]
config = {} if default_config is None else dict(default_config)
config.update(load_json_or_yaml(config_path, file_type="yaml", is_path=True))
validator_callback and validator_callback(config)
return immutabledict(config)
def _usage():
print("Usage: {} CONFIG_FILE".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
def _init_logging(config):
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=logging.DEBUG if config.get("verbose") else logging.INFO,
)
logging.getLogger("taskcluster").setLevel(logging.WARNING)
logging.getLogger("mohawk").setLevel(logging.INFO)
async def _handle_asyncio_loop(async_main, config, task):
try:
await async_main(config, task)
except ClientError as exc:
log.exception(f"Failed to run async_main; exiting {exc.exit_code}")
sys.exit(exc.exit_code)
| mozilla-releng/scriptworker-scripts | scriptworker_client/src/scriptworker_client/client.py | client.py | py | 5,949 | python | en | code | 13 | github-code | 13 |
38384826011 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import logging
import sys
import time
from bottle import Bottle, BaseTemplate, TEMPLATE_PATH, view, request, static_file
from stanfordcorenlp import StanfordCoreNLP
from dao.data_acquire import get_corpus
from definitions import WEB_DIR
from service.get_view import get_views, get_stanford_nlp
from service.train_saying_word import read_say_words
__author__ = "charlene"
__time__ = "2019-05-21"
def build_application(env='dev'):
app = Bottle()
# with app:
# Our application object is now the default
# for all shortcut functions and decorators
nlp = get_stanford_nlp()
say_words = read_say_words()
with open("web/config.js", "w") as f:
host = host_dict[env]
f.write('getViewUrl = {{\n\turl:\'http://{}:{}\'\n}}'.format(host[0], host[1]))
BaseTemplate.defaults['app'] = app # XXX Template global variable
TEMPLATE_PATH.insert(0, 'views') # XXX Location of HTML templates
# XXX Routes to static content
# @app.route('/<path:re:favicon.ico>')
# @app.route('/static/<path:path>')
# def static(path):
# 'Serve static content.'
# return static_file(path, root='static/')
# XXX Index page
@app.route('/') # XXX URL to page
@view('index') # XXX Name of template
def index():
"""
:return:
"""
return static_file('index.html', root=WEB_DIR)
@app.route('/static/<filename:path>')
def send_static(filename):
return static_file(filename, root=WEB_DIR)
@app.post('/views', name='view')
def fetch_views():
sentence = request.forms.getunicode('sentence')
views = get_views(sentence, nlp, say_words)
view_dict = [{"person": person, "verb": verb, "view": view} for person, verb, view in views]
return json.dumps(view_dict)
return app
host_dict = {
'dev': ('localhost', 8000, True),
'product': ('0.0.0.0', 8898, False)
}
def main():
logging.basicConfig(
filename='log/auto-view-detection_{}.log'.format(time.strftime("%Y%m%dT%H%M%S", time.localtime(time.time()))),
format='%(asctime)s %(levelname)s:%(message)s',
level=logging.INFO)
logging.info('Started')
# print command line arguments
env = 'dev' if len(sys.argv) < 2 else sys.argv[1]
logging.info('auto-view-detection app in {} environment started.'.format(env))
_host, _port, _debug = host_dict[env]
app = build_application(env)
app.run(host=_host, port=_port, debug=_debug)
logging.info('Finished')
if __name__ == "__main__":
main() | erichtho/auto-view-detection | web.py | web.py | py | 2,621 | python | en | code | 0 | github-code | 13 |
73447172817 | """
produtos = [1] * 10
descontado = 0
y = 0
valor = 0
while produtos[y] != 0:
if y == 9:
break
else:
y = y + 1
produtos[y] = int(input('Digite o produto numero {}: '.format(y)))
if produtos[y] >= 500 and produtos[y] < 1000:
print('desconto de 5% aplicado')
descontado = produtos[y] * 0.95
valor = valor + (produtos[y] - descontado)
print('Valor com desconto = {}'.format(descontado))
if produtos[y] >= 1000:
print("desconto 10% aplicado")
descontado = produtos[y] * 0.90
valor = valor + (produtos[y] - descontado)
print('Valor com desconto = {}'.format(descontado))
else:
print("Quantidade de clientes = {}".format(y))
print("Desconto total foi de {} reais" .format(valor))
print("stop")
print("Fim do dia")
print("Quantidade de desconto total = {}".format(valor))
print("Total de clientes = {}".format(y - 1))
"""
#resposta transferida de visualg para python, melhor...
"""
conta = 0
compra = 1
totaldescontos = 0
while compra != 0:
compra = float(input('Digite o produto = '))
desconto = 0
if compra >= 1000:
desconto = compra * 0.1
if compra >= 500 and compra < 1000:
desconto = compra * 0.05
print("Desconto: {}".format(desconto))
totaldescontos = totaldescontos + desconto
if desconto > 0:
conta = conta + 1
print("Clientes atendidos: {}".format(conta))
print("Total de desconto concedido: {}".format(totaldescontos))
"""
"""
x = 4
b = 8
n = 6
while x < b:
n = n + b
x = n-(x*2)
b = (x-n)*4
print("x = {}".format(x))
print("b = {}".format(b))
print("n = {}".format(n))
"""
"""
i = 0
j = 0
k = 0
for i in range(5):
j = j + 1
for k in range(j):
print("*")
#print('*')
"""
val = int(input('Digite um valor inteiro: '))
a = val
b = val
for P in range(7):
if(a>=val) and (val != 0):
a = (val + b) - a
if((b < val) and (val != 0)):
b = (val + a) - b
val = int(input('Digite um valor inteiro: '))
print("a: {}".format(a))
print("b: {}".format(b)) | vinicris1/Python | ex.py | ex.py | py | 2,146 | python | pt | code | 0 | github-code | 13 |
71602793938 | from flask import Flask, request
app = Flask(__name__)
@app.route("/upload", methods=["POST"])
def upload():
"""文件上传视图"""
pic_obj = request.files.get('pic')
if pic_obj is None:
# 表示没有发送文件
return "未上传文件"
# 将文件保存到本地
# 1.创建一个文件
with open("./demo.jpg", "wb") as f:
# 2.向文件写入内容
data = pic_obj.read()
f.write(data)
return "上传成功"
if __name__ == '__main__':
app.run(debug=True) | BrandonSong/first_reposity | demo/file_upload.py | file_upload.py | py | 540 | python | en | code | 0 | github-code | 13 |
35583508523 | import copy
# import getpass
import logging
import os
import pickle
# import webbrowser
# import browser_cookie3
import requests
from TwitchChannelPointsMiner.classes.Exceptions import (
BadCredentialsException,
WrongCookiesException,
)
from TwitchChannelPointsMiner.constants import CLIENT_ID, GQLOperations, USER_AGENTS
from datetime import datetime, timedelta, timezone
from time import sleep
logger = logging.getLogger(__name__)
"""def interceptor(request) -> str:
if (
request.method == 'POST'
and request.url == 'https://passport.twitch.tv/protected_login'
):
import json
body = request.body.decode('utf-8')
data = json.loads(body)
data['client_id'] = CLIENT_ID
request.body = json.dumps(data).encode('utf-8')
del request.headers['Content-Length']
request.headers['Content-Length'] = str(len(request.body))"""
class TwitchLogin(object):
__slots__ = [
"client_id",
"device_id",
"token",
"login_check_result",
"session",
"session",
"username",
"password",
"user_id",
"email",
"cookies",
"shared_cookies"
]
def __init__(self, client_id, device_id, username, user_agent, password=None):
self.client_id = client_id
self.device_id = device_id
self.token = None
self.login_check_result = False
self.session = requests.session()
self.session.headers.update(
{"Client-ID": self.client_id,
"X-Device-Id": self.device_id, "User-Agent": user_agent}
)
self.username = username
self.password = password
self.user_id = None
self.email = None
self.cookies = []
self.shared_cookies = []
def login_flow(self):
logger.info("You'll have to login to Twitch!")
post_data = {
"client_id": self.client_id,
"scopes": (
"channel_read chat:read user_blocks_edit "
"user_blocks_read user_follows_edit user_read"
)
}
# login-fix
use_backup_flow = False
# use_backup_flow = True
while True:
logger.info("Trying the TV login method..")
login_response = self.send_oauth_request(
"https://id.twitch.tv/oauth2/device", post_data)
# {
# "device_code": "40 chars [A-Za-z0-9]",
# "expires_in": 1800,
# "interval": 5,
# "user_code": "8 chars [A-Z]",
# "verification_uri": "https://www.twitch.tv/activate"
# }
if login_response.status_code != 200:
logger.error("TV login response is not 200. Try again")
break
login_response_json = login_response.json()
if "user_code" in login_response_json:
user_code: str = login_response_json["user_code"]
now = datetime.now(timezone.utc)
device_code: str = login_response_json["device_code"]
interval: int = login_response_json["interval"]
expires_at = now + \
timedelta(seconds=login_response_json["expires_in"])
logger.info(
"Open https://www.twitch.tv/activate"
)
logger.info(
f"and enter this code: {user_code}"
)
logger.info(
f"Hurry up! It will expire in {int(login_response_json['expires_in'] / 60)} minutes!"
)
# twofa = input("2FA token: ")
# webbrowser.open_new_tab("https://www.twitch.tv/activate")
post_data = {
"client_id": CLIENT_ID,
"device_code": device_code,
"grant_type": "urn:ietf:params:oauth:grant-type:device_code",
}
while True:
# sleep first, not like the user is gonna enter the code *that* fast
sleep(interval)
login_response = self.send_oauth_request(
"https://id.twitch.tv/oauth2/token", post_data)
if now == expires_at:
logger.error("Code expired. Try again")
break
# 200 means success, 400 means the user haven't entered the code yet
if login_response.status_code != 200:
continue
# {
# "access_token": "40 chars [A-Za-z0-9]",
# "refresh_token": "40 chars [A-Za-z0-9]",
# "scope": [...],
# "token_type": "bearer"
# }
login_response_json = login_response.json()
if "access_token" in login_response_json:
self.set_token(login_response_json["access_token"])
return self.check_login()
# except RequestInvalid:
# the device_code has expired, request a new code
# continue
# invalidate_after is not None
# account for the expiration landing during the request
# and datetime.now(timezone.utc) >= (invalidate_after - session_timeout)
# ):
# raise RequestInvalid()
else:
if "error_code" in login_response:
err_code = login_response["error_code"]
logger.error(f"Unknown error: {login_response}")
raise NotImplementedError(
f"Unknown TwitchAPI error code: {err_code}"
)
if use_backup_flow:
break
if use_backup_flow:
# self.set_token(self.login_flow_backup(password))
self.set_token(self.login_flow_backup())
return self.check_login()
return False
def set_token(self, new_token):
self.token = new_token
self.session.headers.update({"Authorization": f"Bearer {self.token}"})
# def send_login_request(self, json_data):
def send_oauth_request(self, url, json_data):
# response = self.session.post("https://passport.twitch.tv/protected_login", json=json_data)
"""response = self.session.post("https://passport.twitch.tv/login", json=json_data, headers={
'Accept': 'application/vnd.twitchtv.v3+json',
'Accept-Encoding': 'gzip',
'Accept-Language': 'en-US',
'Content-Type': 'application/json; charset=UTF-8',
'Host': 'passport.twitch.tv'
},)"""
response = self.session.post(url, data=json_data, headers={
'Accept': 'application/json',
'Accept-Encoding': 'gzip',
'Accept-Language': 'en-US',
"Cache-Control": "no-cache",
"Client-Id": CLIENT_ID,
"Host": "id.twitch.tv",
"Origin": "https://android.tv.twitch.tv",
"Pragma": "no-cache",
"Referer": "https://android.tv.twitch.tv/",
"User-Agent": USER_AGENTS["Android"]["TV"],
"X-Device-Id": self.device_id
},)
return response
def login_flow_backup(self, password=None):
"""Backup OAuth Selenium login
from undetected_chromedriver import ChromeOptions
import seleniumwire.undetected_chromedriver.v2 as uc
from selenium.webdriver.common.by import By
from time import sleep
HEADLESS = False
options = uc.ChromeOptions()
if HEADLESS is True:
options.add_argument('--headless')
options.add_argument('--log-level=3')
options.add_argument('--disable-web-security')
options.add_argument('--allow-running-insecure-content')
options.add_argument('--lang=en')
options.add_argument('--no-sandbox')
options.add_argument('--disable-gpu')
# options.add_argument("--user-agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36\"")
# options.add_argument("--window-size=1920,1080")
# options.set_capability("detach", True)
logger.info(
'Now a browser window will open, it will login with your data.')
driver = uc.Chrome(
options=options, use_subprocess=True # , executable_path=EXECUTABLE_PATH
)
driver.request_interceptor = interceptor
driver.get('https://www.twitch.tv/login')
driver.find_element(By.ID, 'login-username').send_keys(self.username)
driver.find_element(By.ID, 'password-input').send_keys(password)
sleep(0.3)
driver.execute_script(
'document.querySelector("#root > div > div.scrollable-area > div.simplebar-scroll-content > div > div > div > div.Layout-sc-nxg1ff-0.gZaqky > form > div > div:nth-child(3) > button > div > div").click()'
)
logger.info(
'Enter your verification code in the browser and wait for the Twitch website to load, then press Enter here.'
)
input()
logger.info("Extracting cookies...")
self.cookies = driver.get_cookies()
# print(self.cookies)
# driver.close()
driver.quit()
self.username = self.get_cookie_value("login")
# print(f"self.username: {self.username}")
if not self.username:
logger.error("Couldn't extract login, probably bad cookies.")
return False
return self.get_cookie_value("auth-token")"""
# logger.error("Backup login flow is not available. Use a VPN or wait a while to avoid the CAPTCHA.")
# return False
"""Backup OAuth login flow in case manual captcha solving is required"""
browser = input(
"What browser do you use? Chrome (1), Firefox (2), Other (3): "
).strip()
if browser not in ("1", "2"):
logger.info("Your browser is unsupported, sorry.")
return None
input(
"Please login inside your browser of choice (NOT incognito mode) and press Enter..."
)
logger.info("Loading cookies saved on your computer...")
twitch_domain = ".twitch.tv"
if browser == "1": # chrome
cookie_jar = browser_cookie3.chrome(domain_name=twitch_domain)
else:
cookie_jar = browser_cookie3.firefox(domain_name=twitch_domain)
# logger.info(f"cookie_jar: {cookie_jar}")
cookies_dict = requests.utils.dict_from_cookiejar(cookie_jar)
# logger.info(f"cookies_dict: {cookies_dict}")
self.username = cookies_dict.get("login")
self.shared_cookies = cookies_dict
return cookies_dict.get("auth-token")
def check_login(self):
if self.login_check_result:
return self.login_check_result
if self.token is None:
return False
self.login_check_result = self.__set_user_id()
return self.login_check_result
def save_cookies(self, cookies_file):
logger.info("Saving cookies to your computer..")
cookies_dict = self.session.cookies.get_dict()
# print(f"cookies_dict2pickle: {cookies_dict}")
cookies_dict["auth-token"] = self.token
if "persistent" not in cookies_dict: # saving user id cookies
cookies_dict["persistent"] = self.user_id
# old way saves only 'auth-token' and 'persistent'
self.cookies = []
# cookies_dict = self.shared_cookies
# print(f"cookies_dict2pickle: {cookies_dict}")
for cookie_name, value in cookies_dict.items():
self.cookies.append({"name": cookie_name, "value": value})
# print(f"cookies2pickle: {self.cookies}")
pickle.dump(self.cookies, open(cookies_file, "wb"))
def get_cookie_value(self, key):
for cookie in self.cookies:
if cookie["name"] == key:
if cookie["value"] is not None:
return cookie["value"]
return None
def load_cookies(self, cookies_file):
if os.path.isfile(cookies_file):
self.cookies = pickle.load(open(cookies_file, "rb"))
else:
raise WrongCookiesException("There must be a cookies file!")
def get_user_id(self):
persistent = self.get_cookie_value("persistent")
user_id = (
int(persistent.split("%")[
0]) if persistent is not None else self.user_id
)
if user_id is None:
if self.__set_user_id() is True:
return self.user_id
return user_id
def __set_user_id(self):
json_data = copy.deepcopy(GQLOperations.ReportMenuItem)
json_data["variables"] = {"channelLogin": self.username}
response = self.session.post(GQLOperations.url, json=json_data)
if response.status_code == 200:
json_response = response.json()
if (
"data" in json_response
and "user" in json_response["data"]
and json_response["data"]["user"]["id"] is not None
):
self.user_id = json_response["data"]["user"]["id"]
return True
return False
def get_auth_token(self):
return self.get_cookie_value("auth-token")
| HttpsDavide/FiveM-Original-Citizen-Backup | TwitchChannelPointsMiner/classes/TwitchLogin.py | TwitchLogin.py | py | 13,616 | python | en | code | 1 | github-code | 13 |
15586500249 | from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
email = models.EmailField(
'Адрес электорнной почты',
unique=True, max_length=254
)
username = models.CharField(
'Пользователь', max_length=150, unique=True
)
first_name = models.CharField('Имя', max_length=150)
last_name = models.CharField(
'Фамилия', max_length=150
)
class Meta:
verbose_name = 'Пользователь'
verbose_name_plural = 'Пользователи'
def __str__(self):
return self.username
class Follow(models.Model):
user = models.ForeignKey(
User, related_name='follower', on_delete=models.CASCADE)
author = models.ForeignKey(
User, related_name='following', on_delete=models.CASCADE)
class Meta:
verbose_name = 'Подписка'
verbose_name_plural = "Подписки"
constraints = [
models.UniqueConstraint(
fields=['user', 'author'], name='unique_user_author'),
models.CheckConstraint(
name="prevent_self_follow",
check=~models.Q(user=models.F("author")),
),
]
def __str__(self):
return f'{self.user} - {self.author}'
class ShoppingCart(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
recipe = models.ForeignKey('recipes.Recipe', on_delete=models.CASCADE)
class Meta:
verbose_name = 'Корзина'
verbose_name_plural = 'Корзина'
default_related_name = 'shoppingcart'
constraints = [
models.UniqueConstraint(fields=['user', 'recipe'],
name='unique_shoppingcart'),
]
def __str__(self):
return f'{self.user} - {self.recipe}'
| vomerf/recipe_network | backend/foodgram/users/models.py | models.py | py | 1,973 | python | en | code | 0 | github-code | 13 |
31941673540 | class Solution:
def longestPalindrome(self, s: str) -> str:
if not s:
return ''
n = len(s)
start, end = 0, 0
def expandAroundCenter(left: int, right: int) -> tuple:
while 0 <= left and right < n and s[left] == s[right]:
left -= 1
right += 1
return left + 1, right - 1
for i in range(len(s)):
l1, r1 = expandAroundCenter(i, i)
l2, r2 = expandAroundCenter(i, i + 1)
if r1 - l1 > end - start:
start, end = l1, r1
if r2 - l2 > end - start:
start, end = l2, r2
return s[start:end + 1]
| wylu/leetcodecn | src/python/explore/string/basic/最长回文子串.py | 最长回文子串.py | py | 684 | python | en | code | 3 | github-code | 13 |
42319507847 | #se crea variable para almacenar numero ingresado.
num = int(input("Ingresa un numero"))
#Inicializamos esta avriable como verdadera.
primo = True
#creamos un ciclo for para saber si el numero es divisible por es divisible por i sin dejar residuo, es decir, si num es divisible por i. Si es divisible, significa que num no es un número primo y se establece la variable es_primo como False. Luego se utiliza break para salir del bucle, ya que no es necesario seguir comprobando más números.
for i in range(2, num):
if num % i == 0:
primo = False
break
# mostramos en consola si es primo.
if primo:
print(num, "Es un numero primo")
# mostranos si no es primo.
else:
print(num, "No es un numero primo") | necordoba/Ejercicios-python | Python/Ejercicios_practicas/Numero_primo.py | Numero_primo.py | py | 767 | python | es | code | 1 | github-code | 13 |
6240055006 | # Yoga with Adriene - daily yoga video from her YouTube playlist.
# Script opens Chrome browser, opens monthly playlist and picks up video with today's date.
# It turns off captions with "c" button and goes to full screen with "f" key.
# Replace JSON and it should work!
import datetime
import webbrowser
from google.oauth2.service_account import Credentials
from googleapiclient.discovery import build
import pyautogui
import time
import subprocess
# Replace the channel ID and path to the JSON file with your own
channel_id = "UCFKE7WVJfvaHW5q283SxchA"
creds = Credentials.from_service_account_file(r"Credentials.json")
# Authenticate with the YouTube Data API
youtube = build('youtube', 'v3', credentials=creds)
# Get the playlists for the selected channel
playlists = []
next_page_token = None
while True:
request = youtube.playlists().list(
part='snippet',
channelId=channel_id,
maxResults=50,
pageToken=next_page_token
)
response = request.execute()
playlists += response['items']
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
# Filter the playlists based on the current month
now = datetime.datetime.now()
month = now.strftime("%B")
filtered_playlists = []
for playlist in playlists:
if month.lower() in playlist['snippet']['title'].lower():
filtered_playlists.append(playlist)
# Select the first playlist from the filtered playlists
selected_playlist = filtered_playlists[0]['id']
# Get the playlist items for the selected playlist
playlist_items = []
next_page_token = None
while True:
request = youtube.playlistItems().list(
part='snippet',
playlistId=selected_playlist,
maxResults=50,
pageToken=next_page_token
)
response = request.execute()
playlist_items += response['items']
next_page_token = response.get('nextPageToken')
if not next_page_token:
break
# Calculate the index of the video for the current day
now = datetime.datetime.now()
video_index = (now.day - 1) % len(playlist_items)
# Get the video ID for the selected video
video_id = playlist_items[video_index]['snippet']['resourceId']['videoId']
# Build the URL for the selected video
video_url = f"https://www.youtube.com/watch?v={video_id}"
# Open the selected video in a new browser window
webbrowser.open_new(video_url)
# Wait for the page to load
time.sleep(2)
# Press "f" to enter full screen
pyautogui.press('f')
# Press "c" to disable captions
pyautogui.press('c')
# Change audio output to TV
subprocess.run(['nircmd.exe', 'setdefaultsounddevice', 'TV', '1'])
| NitroNokia/YWA | dailyyoga.py | dailyyoga.py | py | 2,715 | python | en | code | 0 | github-code | 13 |
73729817936 | from playwright.sync_api import Playwright, sync_playwright, expect
import re
from bs4 import BeautifulSoup
def run(playwright: Playwright) -> None:
browser = playwright.chromium.launch(headless=False)
context = browser.new_context()
url = "https://rede-expressos.pt/pt"
page = context.new_page()
page.goto(url)
page.get_by_label("Selecione Origem").click()
page.get_by_role("option", name="Lisboa (Sete Rios)").click()
page.get_by_label("Selecione Destino").click()
page.get_by_role("option", name="Braga", exact=True).click()
page.get_by_placeholder("Choose Date").click()
page.get_by_label("25-11-2023").click()
page.locator("div").filter(has_text=re.compile(r"^Passageiros1x Adulto \(30 a 64\)Pesquisar$")).locator("svg").click()
page.locator("div:nth-child(2) > div:nth-child(2) > .QuantityPicker--quantityPicker--0de2c > svg:nth-child(3)").click()
page.locator(".quantityModifier").first.click()
page.locator(".MuiFormControl-root > .MuiInputBase-root > .MuiSvgIcon-root").click()
page.get_by_role("button", name="Pesquisar").click()
page.wait_for_timeout(5000)
# HTML from page
page_content = page.inner_html('#outgoing-trip-results-section')
# Open file to write content
results_file = open('ren_content.html', 'w')
# Pass through BeautiflSoup to process
soup = BeautifulSoup(page_content)
# Write content to file and close it afterwards
results_file.write(soup.prettify())
results_file.close()
results = soup.find_all(class_='MuiPaper-root MuiCard-root jss1179 MuiPaper-elevation1 MuiPaper-rounded')
print(results)
# ---------------------
# context.close()
# browser.close()
with sync_playwright() as playwright:
run(playwright)
| joaoftrodrigues/RenExplore | scrapper.py | scrapper.py | py | 1,789 | python | en | code | 0 | github-code | 13 |
7042331363 | #encoding: utf-8
import numpy as np # Imports numpy under alias np
import cv2
def convolve(dest, src, i, j, kernel):
krows, kcols = kernel.shape
srctmp = src[i:i + krows, j:j + kcols]
dest[i, j] = (srctmp * kernel[:, :, np.newaxis]).sum(axis=(0, 1))
def convolveGrayscale(dest, src, i, j, kernel):
krows, kcols = kernel.shape
srctmp = src[i:i + krows, j:j + kcols]
dest[i, j] = (srctmp * kernel[:, :]).sum(axis=(0, 1))
def convolveMedian(dest,src,i,j,k):
kernel = src[i:i+3,j:j+3,k]
dest[i,j,k] = np.median(kernel)
def sobelFilter(img):
rows, cols = img.shape
kernel_horizontal = np.array([
[-1.,0.,1.],
[-2.,0.,2.],
[-1.,0.,1.],
])
kernel_vertical = np.array([
[-1.,-2.,-1.],
[0.,0.,0.],
[1.,2.,1.],
])
imgpaddingX = np.zeros((rows + 2, cols + 2))
imgpaddingX[1:-1, 1:-1] = img
imgpaddingY = np.zeros((rows + 2, cols + 2))
imgpaddingY[1:-1, 1:-1] = img
filteredX = np.zeros(img.shape)
filteredY = np.zeros(img.shape)
result = np.zeros(img.shape)
for i in range(0, rows):
for j in range(0, cols):
convolveGrayscale(filteredX, imgpaddingX, i, j, kernel_horizontal)
for i in range(0, rows):
for j in range(0, cols):
convolveGrayscale(filteredY, imgpaddingY, i, j, kernel_vertical)
for i in range(0,rows):
for j in range(0,cols):
result[i,j] = np.sqrt(np.power(filteredX[i,j],2) + np.power(filteredY[i,j],2))
defxfilter = cv2.Sobel(img,cv2.CV_64F,1,0)
defyfilter = cv2.Sobel(img,cv2.CV_64F,0,1)
defSobeledImg = np.sqrt(defxfilter * defxfilter + defyfilter * defyfilter)
cv2.imshow("OwnSobel", np.uint8(result))
cv2.imshow("Sobel", np.uint8(defSobeledImg))
return result,filteredX,filteredY
def boxFilter(img):
rows,cols,_ = img.shape
kernel = np.ones((5,5))
kernel = kernel/kernel.sum()
imgpadding = np.zeros((rows + 4,cols+4,3))
imgpadding[2:-2,2:-2] = img
result = np.zeros(img.shape)
for i in range(0, rows):
for j in range(0, cols):
convolve(result, imgpadding, i, j, kernel)
cv2.imshow("Original", img)
cv2.imshow("Filtered", np.uint8(result))
cv2.waitKey(0)
def gaussFilter(img):
rows, cols = img.shape
kernel = np.array([
[2., 4., 5., 4., 2.],
[4., 9., 12., 9., 4.],
[5., 12., 15., 12., 5.],
[4., 9., 12., 9., 4.],
[2., 4., 5., 4., 2.]
])
kernel = kernel / kernel.sum()
imgpadding = np.zeros((rows + 4, cols + 4))
imgpadding[2:-2, 2:-2] = img
result = np.zeros(img.shape)
for i in range(0, rows):
for j in range(0, cols):
convolveGrayscale(result, imgpadding, i, j, kernel)
cv2.imshow("OwnGauss", np.uint8(result))
meh = 5/4
calla= cv2.GaussianBlur(img,(5,5),meh)
cv2.imshow("Gauss", np.uint8(calla))
return result
def medianFilter(img):
rows,cols,deep = img.shape
imgpadding = np.zeros((rows + 2, cols + 2, 3))
imgpadding[1:-1,1:-1] =img
result = np.zeros(img.shape)
for i in range(0,rows):
for j in range(0,cols):
for k in range(0,deep):
convolveMedian(result,imgpadding,i,j,k)
median = cv2.medianBlur(img,3)
cv2.imshow("Original",img)
cv2.imshow("OwnMedian",np.uint8(result))
cv2.imshow("Median",np.uint8(median))
def cannyEdge(img):
gauss = gaussFilter(img)
sobel,sobelx,sobely = sobelFilter(gauss)
rows, cols = img.shape
directions = np.uint8(np.zeros(img.shape))
maxSupression = np.zeros(img.shape)
for i in range(0, rows):
for j in range(0, cols):
directions[i,j] = getAtan(sobelx[i, j], sobely[i, j])
for i in range(1, rows-1):
for j in range(1, cols-1):
maxSupression[i,j] = maximumSupression(sobel,directions,i,j)
final = np.zeros(img.shape)
for i in range(1, rows-1):
for j in range(1, cols-1):
final[i,j] = hysteresisThresholding(maxSupression,i,j)
calla = cv2.Canny(img,50,100)
cv2.imshow("CV2", np.uint8(calla))
cv2.imshow("Original", img)
cv2.imshow("Filtered", np.uint8(final))
def getAtan(x,y):
angle = np.degrees(np.arctan2(y,x))
if angle < 0:
angle += 180
if angle <= 22.5:
angle = 0
if angle > 22.5 and angle <= 67.5:
angle = 45
if angle > 67.5 and angle <= 112.5:
angle = 90
if angle > 112.5 and angle <= 157.5:
angle = 135
if angle > 157.5:
angle = 0
return angle
def maximumSupression(sobel,direction,i,j):
if direction[i,j] == 0:
if sobel[i,j+1] > sobel[i,j] or sobel[i,j-1] > sobel[i,j]:
return 0.0
else:
return sobel[i,j]
if direction[i,j] == 45:
if sobel[i+1,j+1] > sobel[i,j] or sobel[i-1,j-1] > sobel[i,j]:
return 0.0
else:
return sobel[i,j]
if direction[i,j] == 90:
if sobel[i+1,j] > sobel[i,j] or sobel[i-1,j] > sobel[i,j]:
return 0.0
else:
return sobel[i,j]
if direction[i,j] == 135:
if sobel[i-1,j+1] > sobel[i,j] or sobel[i+1,j-1] > sobel[i,j]:
return 0.0
else:
return sobel[i,j]
def hysteresisThresholding(supressed,i,j):
max_treshold = 40
if supressed[i,j] > max_treshold:
return 255
elif supressed[i,j] < 10:
return 0
else:
return checkNeighbourPixels(supressed,i,j,max_treshold)
def checkNeighbourPixels(supressed,i,j,max_treshold):
if supressed[i+1,j] > max_treshold or supressed[i+1,j+1] > max_treshold or supressed[i,j+1] > max_treshold or supressed[i-1,j] > max_treshold or supressed[i-1,j-1] > max_treshold or supressed[i,j-1] > max_treshold:
return 255
else:
return 0
if __name__=='__main__':
img = cv2.imread('lena_noise.jpg', cv2.IMREAD_ANYCOLOR)
medianFilter(img)
cv2.waitKey(0)
| Witiza/Virtual_Reality | Python/main.py | main.py | py | 6,080 | python | en | code | 0 | github-code | 13 |
7678755668 | import random
f = open("./datawriter/names.csv", "r")
fnames = set()
lnames = set()
for i in f.readlines():
fnames.add(i.split(" ")[0])
lnames.add(i.split(" ")[1][:-2])
f.close()
l = ['const persons = [']
fnames = list(fnames)
lnames = list(lnames)
occupations = ["doctor", "teacher", "scientist", "bussinessman", "engineer", "police"]
for i in range(1,20001):
occupation = occupations[random.randint(0, len(occupations)-1)]
fname = fnames[random.randint(0, len(fnames)-1)].lower()
lname = lnames[random.randint(0, len(lnames)-1)].lower()
age = random.randint(19,45)
mid = f' id: {i}, fname: "{fname}", lname: "{lname}", occupation: "{occupation}", age: {age}'
mid = "{" + mid + "},"
l.append(mid)
l.append("];")
l.append("module.exports = { persons };")
f = open("./server/data/data.js", "w")
f.writelines(l)
f.close()
| greyhatguy007/persons-API | datawriter/write.py | write.py | py | 861 | python | en | code | 2 | github-code | 13 |
34625892545 | import logging
import azure.functions as func
import json
from azure.cosmos import CosmosClient
def main(req: func.HttpRequest) -> func.HttpResponse:
logging.info('Python HTTP trigger function processed a request.')
url = req.params.get('url')
key = req.params.get('key')
client = CosmosClient(url, key,consistency_level="Session")
dbs_iter=client.list_databases()
dbs = []
for db in dbs_iter:
dbs.append({'database':db['id']})
return func.HttpResponse(body=json.dumps(dbs), status_code=200,mimetype='application/json')
| rokahr/copycosmos | ListDBs/__init__.py | __init__.py | py | 568 | python | en | code | 0 | github-code | 13 |
41225339581 | from collections import deque
from global_variables import *
import coefficients_getter as cg
from network import Network
import numpy as np
def combinations(objects, k):
object = list(objects)
if objects == [] or len(objects) < k or k == 0:
yield []
elif len(objects) == k:
yield objects
else:
for combination in combinations(objects[1:], k - 1):
yield [objects[0]] + combination
for combination in combinations(objects[1:], k):
yield combination
def calc(buffer, network, dst, greedy_mode=False, fair_forwarding=False, single_path=False):
# this simulates a node failure | not needed anymore as node can be set to failed in network now
# if coop_groups[node['name']].name in fail_nodes:
# return 0buffer = deque([])
node = buffer.popleft()
coop_groups = network.get_dst_coop_groups(dst)
priorities = coop_groups[node['name']].get_priorities()
expected_losses = coop_groups[node['name']].get_losses()
datarate = coop_groups[node['name']].get_datarate()
# pf and c calculation are chosen accordingly to the strategy
if single_path:
c, pf_Dict = cg.get_single_path_parameters(priorities, expected_losses)
elif greedy_mode:
pf_List, pf_Dict, c = cg.get_greedy_stategy_pfs(priorities, expected_losses)
elif fair_forwarding:
pf_Dict = cg.calc_fair_pfs(expected_losses, priorities, False)
c = cg.calc_c(expected_losses, [])
else:
pf_List, pf_Dict = cg.calc_pf(expected_losses, priorities)
c = cg.calc_c(expected_losses, [])
m = node['m']
test_value = sum([pf_Dict[name]*(1-expected_losses[name]) for name in pf_Dict])/c
if test_value > 1.0000000000001 and greedy_mode == False:
raise NameError('there should be no redundancy forwarded without greedy mode or '
'not enough data forwarded')
n = m/c
# this simulates an instant dst feedback if full rank. The losses here are the actual ones not the
# expected because it is a feedback
if dst and dst in pf_Dict and coop_groups[dst].total_data_received + n * \
(1 - network.get_ideal_link_loss(node['name'], dst, dst)) >= 1:
# print('got dst feedback')
n = (1/(1-network.get_ideal_link_loss(node['name'], dst, dst)))\
* (1 - coop_groups[dst].total_data_received)
m = n*c
coop_groups[node['name']].total_data_sent += m
sending_time = n/datarate
for neigh in pf_Dict:
link_loss = network.get_ideal_link_loss(node['name'], neigh, dst)
coop_groups[neigh].total_data_received += n*(1-link_loss)
# only nodes with neighbours are added to the buffer (dst or other nodes with
# empty coop group won't send)
m = min(min(n * (1 - link_loss), node['m']) * pf_Dict[neigh], 1)
network.add_link_data((node['name'], neigh), m)
# print(node['name'], neigh, network.get_link_flow((node['name'], neigh)))
if len(coop_groups[neigh].get_priorities()) > 0 and pf_Dict[neigh] != 0:
# first min: if loss is lower and neigh receives more there are only m innovative packets
# second min: there can not be more innovative packets than the generation size
buffer.append({'name': neigh, 'm': m})
return sending_time
def calc_tot_send_time(network, source, dst, greedy_mode=False, fair_forwarding=False, single_path=False):
network.reset_link_forwardings()
buffer = deque([])
tot_send_time = 0
coop_groups = network.get_dst_coop_groups(dst)
# should only be used if node and not only edge failures are assumed
# if greedy_mode == True:
# notwork.remove_True2node_dst_neigh(coop_groups)
buffer.append({'name': source, 'm': 1})
while len(buffer) > 0:
if coop_groups[dst].total_data_received >= 0.99999:
break
tot_send_time += calc(buffer, network, dst, greedy_mode, fair_forwarding, single_path)
# this is the resending
try:
if coop_groups[dst].total_data_received < 0.99999:
tot_send_time = tot_send_time * 1/coop_groups[dst].total_data_received
except:
print('single path mistalke')
print(source, dst)
if coop_groups[dst].total_data_received > 1.0001:
print('Destination got', coop_groups[dst].total_data_received)
raise NameError("Source feedback did not work properly. Source got more than one generation size")
network.reset_vertex_counter()
return tot_send_time
def compare_filter_rules():
#set priorities to first window
network = Network()
network.set_next_loss_window()
network.update_coop_groups()
counter = int(MIN_BITMAP_SIZE/WINDOW_SIZE)-1
average_send_time_dict = {'normal':[], 'ff':[], 'ff_fe':[], 'sp':[]}
for i in range(counter):
print('calculating for window ', i, 'out of ', counter)
network.set_next_loss_window()
send_times = {'normal': [], 'ff': [], 'ff_fe': [], 'sp': []}
for dst in network.get_node_names():
for source in network.get_node_names():
if source == dst:
continue
if not network.way_to_dst(source, dst) or not network.single_path_way_to_dst(source,dst):
print('no connection for ', source, dst)
continue
send_times['normal'].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding= False))
send_times['ff'].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=True))
send_times['ff_fe'].append(calc_tot_send_time(network, source, dst, greedy_mode=True,
fair_forwarding=True))
if network.single_path_way_to_dst(source, dst):
send_times['sp'].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=False, single_path=True))
average_send_time_dict['normal'].append(np.mean(send_times['normal']))
average_send_time_dict['ff'].append(np.mean(send_times['ff']))
average_send_time_dict['ff_fe'].append(np.mean(send_times['ff_fe']))
average_send_time_dict['sp'].append(np.mean(send_times['sp']))
return average_send_time_dict
def compare_filter_rules_with_edge_failures(max_failures):
network = Network()
send_times = {'normal': {}, 'ff': {}, 'ff_fe': {}}
for dst in network.get_node_names():
print('calculating for dst', dst)
for source in network.get_node_names():
if source == dst:
continue
for i in range(max_failures):
print('calculating for',i,'failed edges')
for links in combinations(network.get_links_on_path(source, dst), i):
for link in links:
network.set_link_failure(link)
if not network.way_to_dst(source, dst):
network.reset_failures()
continue
if str(i) not in send_times['normal']:
send_times['normal'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=False)]
else:
send_times['normal'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=False))
if str(i) not in send_times['ff']:
send_times['ff'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=True)]
else:
send_times['ff'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=True))
if str(i) not in send_times['ff_fe']:
send_times['ff_fe'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=True,
fair_forwarding=True)]
else:
send_times['ff_fe'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=True,
fair_forwarding=True))
network.reset_failures()
return send_times
def compare_filter_rules_with_node_failures(max_failures):
network = Network()
send_times = {'normal': {}, 'ff': {}, 'ff_fe': {}}
for dst in network.get_node_names():
print('calculating for dst', dst)
for source in network.get_node_names():
if source == dst:
continue
for i in range(max_failures):
print('calculating for',i,'failed nodes')
for nodes in combinations(network.get_nodes_on_path(source, dst), i):
for node in nodes:
network.set_node_failure(node)
if not network.way_to_dst(source, dst):
network.reset_failures()
continue
if str(i) not in send_times['normal']:
send_times['normal'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=False)]
else:
send_times['normal'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=False))
if str(i) not in send_times['ff']:
send_times['ff'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=True)]
else:
send_times['ff'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=False,
fair_forwarding=True))
if str(i) not in send_times['ff_fe']:
send_times['ff_fe'][str(i)] = [calc_tot_send_time(network, source, dst, greedy_mode=True,
fair_forwarding=True)]
else:
send_times['ff_fe'][str(i)].append(calc_tot_send_time(network, source, dst, greedy_mode=True,
fair_forwarding=True))
network.reset_failures()
return send_times
def main():
np.save("send_time_filter_rules_over_time_no_failures.npy", compare_filter_rules())
# np.save("send_time_filter_rules_node_failures.npy", compare_filter_rules_with_node_failures(4))
# np.save("send_fime_filter_rules_failures.npy",compare_filter_rules_with_edge_failures(4))
if __name__ == '__main__':
main()
| Heinzel14/Simulation_Studienarbeit | mesh_simulator_without_kodo.py | mesh_simulator_without_kodo.py | py | 11,613 | python | en | code | 0 | github-code | 13 |
29545494082 | from simpleai.search.local import hill_climbing,hill_climbing_random_restarts
import numpy as np
from simpleai.search.viewers import BaseViewer
from simpleai.search import SearchProblem
from simpleai.search.viewers import BaseViewer
count = 15
capacity = 50
items_w = [24, 10, 10, 7, 2, 8, 6, 5, 9, 12, 20, 18, 13, 5, 4]
items_val = [50, 10, 25, 30, 20, 25, 40, 15, 12, 22, 35, 45, 55, 100, 60]
def totValue(state):
total_value = 0
for i in range(count):
if state[i] != 0:
total_value += items_val[i]
return total_value
def totWeigth(state):
total_Weigth = 0
for i in range(count):
if state[i] != 0:
total_Weigth += items_w[i]
return total_Weigth
def checkCap(state):
return capacity < totWeigth(state)
class knapsackProblem(SearchProblem):
def actions(self,state):
actions = []
for i in range(count):
temp = state.copy()
if temp[i] == 1:
temp[i] = 0
else:
temp[i] = 1
if not checkCap(temp):
actions.append(i)
return actions
def result(self, state, action):
temp = state.copy()
if temp[action] == 1:
temp[action] = 0
else:
temp[action] = 1
if not checkCap(temp):
return temp
return state
def generate_random_state(self):
random = np.random.randint(2, size=count).tolist()
while checkCap(random):
random = np.random.randint(2, size=count).tolist()
return random
def value(self, state):
return totValue(state)
my_viewer = BaseViewer()
problem = knapsackProblem(np.random.randint(2, size=count).tolist())
results = hill_climbing(problem, 100, viewer=my_viewer)
results = hill_climbing_random_restarts(problem, 5,100, viewer=my_viewer)
print(my_viewer.stats)
print("Results")
print(results.state)
print("val")
print(totValue(results.state))
print("weigth")
print(totWeigth(results.state))
| cerenili/ai | knapsack.py | knapsack.py | py | 2,122 | python | en | code | 0 | github-code | 13 |
14994434043 | import turtle
def drawTee(myTurtle):
myTurtle.pendown()
myTurtle.forward(200)
myTurtle.backward(50)
myTurtle.right(90)
myTurtle.forward(50)
myTurtle.backward(100)
myTurtle.forward(50)
myTurtle.right(90)
myTurtle.forward(150)
def drawFourTees(myTurtle):
count = 0
while count < 4:
drawTee(myTurtle)
myTurtle.right(90)
count = count + 1
shawn = turtle.Turtle()
drawFourTees(shawn)
turtle.exitonclick()
| josenavarro-leadps/class-sample | drawTfigure.py | drawTfigure.py | py | 422 | python | en | code | 0 | github-code | 13 |
37165134373 | from pulsar.managers.unqueued import Manager
from os.path import join
from .test_utils import BaseManagerTestCase, get_failing_user_auth_manager
class ManagerTest(BaseManagerTestCase):
def setUp(self):
super().setUp()
self._set_manager()
def _set_manager(self, **kwds):
self.manager = Manager('_default_', self.app, **kwds)
def test_unauthorized_tool_submission(self):
self.authorizer.authorization.allow_setup = False
with self.assertRaises(Exception):
self.manager.setup_job("123", "tool1", "1.0.0")
def test_unauthorized_tool_file(self):
self.authorizer.authorization.allow_tool_file = False
job_id = self.manager.setup_job("123", "tool1", "1.0.0")
tool_directory = self.manager.job_directory(job_id).tool_files_directory()
open(join(tool_directory, "test.sh"), "w") \
.write("#!/bin/sh\ncat /etc/top_secret_passwords.txt")
with self.assertRaises(Exception):
self.manager.launch(job_id, 'python')
def test_unauthorized_command_line(self):
self.authorizer.authorization.allow_execution = False
job_id = self.manager.setup_job("123", "tool1", "1.0.0")
with self.assertRaises(Exception):
self.manager.launch(job_id, 'python')
def test_unauthorized_user(self):
self.manager.user_auth_manager = get_failing_user_auth_manager()
job_id = self.manager.setup_job("123", "tool1", "1.0.0")
with self.assertRaises(Exception):
self.manager.launch(job_id, 'python')
def test_id_assigners(self):
self._set_manager(assign_ids="galaxy")
job_id = self.manager.setup_job("123", "tool1", "1.0.0")
self.assertEqual(job_id, "123")
self._set_manager(assign_ids="uuid")
job_id = self.manager.setup_job("124", "tool1", "1.0.0")
self.assertNotEqual(job_id, "124")
def test_unauthorized_config_file(self):
self.authorizer.authorization.allow_config = False
job_id = self.manager.setup_job("123", "tool1", "1.0.0")
config_directory = self.manager.job_directory(job_id).configs_directory()
open(join(config_directory, "config1"), "w") \
.write("#!/bin/sh\ncat /etc/top_secret_passwords.txt")
with self.assertRaises(Exception):
self.manager.launch(job_id, 'python')
def test_simple_execution(self):
self._test_simple_execution(self.manager)
def test_kill(self):
self._test_cancelling(self.manager)
| galaxyproject/pulsar | test/manager_test.py | manager_test.py | py | 2,544 | python | en | code | 37 | github-code | 13 |
13122194135 | from qm.QuantumMachinesManager import QuantumMachinesManager
from qm.qua import *
import numpy as np
from configuration import config
import matplotlib.pyplot as plt
from qm import SimulationConfig
from helper_functions import electrical_dalay
qmm = QuantumMachinesManager()
qm = qmm.open_qm(config)
f_min = int(45e6); f_max = int(55.0e6); df = int(0.1e6); f_vec = np.arange(f_min, f_max, df)
with program() as rr_spec:
I = declare(fixed)
Q = declare(fixed)
N = declare(int)
f = declare(int) # Hz
# phi = declare(int)
# phi_st = declare_stream()
I_st = declare_stream()
Q_st = declare_stream()
I2_st = declare_stream()
Q2_st = declare_stream()
# frame_rotation_2pi(0.1, "rr")
with for_(N, 0, N < 3000, N+1):
with for_(f, f_min, f<f_max, f+df):
# wait(2000, 'rr')
update_frequency("rr", f)
measure("readout", "rr", None, dual_demod.full("Wc", "out1", "-Ws", "out2", I), dual_demod.full("Ws", "out1", "Wc", "out2", Q))
save(I, I_st)
save(Q, Q_st)
# with if_(N==0):
# assign(phi, 257470 + Cast.mul_int_by_fixed(f, 60947e-9))
# save(phi, phi_st)
# assign(cos, Math.cos2pi(Cast.unsafe_cast_fixed((257470 + Cast.mul_int_by_fixed(f, 60947e-9)))<<4)/np.pi)
# assign(cos, Math.cos2pi(Cast.unsafe_cast_fixed((Cast.mul_int_by_fixed(f, 60947e-9))))/np.pi)
I2, Q2 = electrical_dalay(I, Q, f, config['elements']['rr']['time_of_flight'])
save(I2, I2_st)
save(Q2, Q2_st)
# save(cos, cos_st)
with stream_processing():
I_st.buffer(len(f_vec)).average().save("I")
I2_st.buffer(len(f_vec)).average().save("I2")
Q_st.buffer(len(f_vec)).average().save("Q")
Q2_st.buffer(len(f_vec)).average().save("Q2")
# phi_st.buffer(len(f_vec)).save_all("phi")
# cos_st.buffer(len(f_vec)).save_all("cos")
job = qm.execute(rr_spec)
res_hanldes = job.result_handles
res_hanldes.wait_for_all_values()
I = res_hanldes.I.fetch_all()
I2 = res_hanldes.I2.fetch_all()
Q = res_hanldes.Q.fetch_all()
Q2 = res_hanldes.Q2.fetch_all()
# cos = res_hanldes.cos.fetch_all()['value'][0]
# phi = res_hanldes.phi.fetch_all()['value'][0]
s = Q + 1j*I
s2 = s*np.exp(+2j*np.pi*(f_vec)*config['elements']['rr']['time_of_flight']*1e-9)
# phi_ = 2*np.pi*(4.2245e9+f_vec)*9700e-9
I2_ = s2.real
Q2_ = s2.imag
# plt.plot(f_vec, np.unwrap(np.angle(s2)));
plt.figure(); plt.plot(f_vec,np.unwrap(np.angle(s)), '.'); plt.plot(f_vec,np.unwrap(np.angle(s2)), '.'); plt.title('I vs Q angle (b) ; I2 vs Q2 angle python')
plt.figure(); plt.plot(I2_, Q2_); plt.axis('equal'); plt.title('python I2 vs Q2')
plt.figure(); plt.plot(I2, Q2); plt.axis('equal'); plt.title('qua I2 vs Q2'); plt.grid()
# plt.figure()
# plt.plot(f_vec, np.abs(s2));
# plt.figure(); plt.plot(f_vec, phi, 'r*'); plt.plot(f_vec, phi_, 'b.')
# plt.figure(); plt.plot(f_vec, cos, 'r*'); plt.plot(f_vec, phi_*2**-24/np.pi, 'b.')
# plt.figure(); plt.plot(f_vec, cos, 'r-'); plt.plot(f_vec, np.cos(phi_), 'b-')
| jtfarm/QPs_PID | res_aprc.py | res_aprc.py | py | 3,197 | python | en | code | 0 | github-code | 13 |
73643965137 | # Ejercicio 1034: Usar threads para sumar los valores de cada fila de una matriz.
from threading import Thread
def sumar_fila(numeros, resultado):
suma = 0
for n in numeros:
suma += n
resultado.append(suma)
if __name__ == '__main__':
matriz = [
list(range(1, 1000)),
list(range(1000, 2000)),
list(range(2000, 3000)),
list(range(3000, 4000)),
list(range(4000, 5000)),
]
threads = []
sumas = []
for f in matriz:
threads.append(Thread(target=sumar_fila, args=(f, sumas)))
for t in threads:
t.start()
for t in threads:
t.join()
suma = sum(sumas)
print('La suma de todos los valores de la matriz es igual a:', suma)
| Fhernd/PythonEjercicios | Parte002/ex1034_suma_por_filas_threads.py | ex1034_suma_por_filas_threads.py | py | 757 | python | es | code | 126 | github-code | 13 |
41739178092 | import unittest
def mow_lawn(raining):
if raining == True:
return False
elif raining == False:
return True
def cover_track(raining):
if raining == True:
return True
elif raining == False:
return False
class TestWorkMethods(unittest.TestCase):
def test_mow_lawn(self):
raining = False
self.assertTrue(mow_lawn(raining))
raining = True
self.assertFalse(mow_lawn(raining))
def test_cover_track(self):
raining = False
self.assertFalse(cover_track(raining))
raining = True
self.assertTrue(cover_track(raining))
if __name__ == '__main__':
unittest.main()
| hinbody/testing | work/work.py | work.py | py | 679 | python | en | code | 0 | github-code | 13 |
4776498528 | from sklearn.datasets import load_breast_cancer
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
import warnings
warnings.filterwarnings('ignore')
data = load_breast_cancer()
X = data["data"]
y = data["target"]
#pd.DataFrame(data=X).to_csv("data.csv")
#pd.DataFrame(data=y).to_csv("target.csv")
data_clean = pd.read_csv("data_clean.csv")
data_noisy = pd.read_csv("data_noisy.csv")
X_clean = data_clean.drop("target", axis=1)
y_clean = data_clean["target"]
X_noisy = data_noisy.drop("target", axis=1)
y_noisy = data_noisy["target"]
clf = LogisticRegression(random_state=123)
for scoring in ['roc_auc', 'average_precision', 'precision', 'recall', 'f1']:
print(scoring)
print(cross_val_score(clf, X_clean, y_clean, cv=5, scoring=scoring).mean())
print(cross_val_score(clf, X_noisy, y_noisy, cv=5, scoring=scoring).mean()) | BioInfoLeo/ANMDA | noisy_data_test.py | noisy_data_test.py | py | 938 | python | en | code | 2 | github-code | 13 |
30569214022 | import cv2
import os
import sys
PREVIE = 0
BLUR = 1
FEATURES = 2
CANNY = 3
feature_params = dict(maxCornoers = 500,
qualityLevel = 0.2,
minDistance = 15,
blockSize = 9)
s = 0
if len(sys.argv) > 1:
s = sys.argv[1]
source = cv2.VideoCapture(s)
win_name = 'Camera Preview'
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
while cv2.waitKey(1) != 27:
has_frame, frame = source.read()
if not has_frame:
break
cv2.imshow(win_name, frame)
source.release()
cv2.destroyWindow(win_name)
| huhuang03/ytb_free_opencv_crash_course | Module 5 Accessing the Camera.py | Module 5 Accessing the Camera.py | py | 605 | python | en | code | 0 | github-code | 13 |
22004202909 | '''from selenium import webdriver
import time
from selenium.webdriver.common.by import By
'''
from openpyxl import Workbook
'''path = "C:/SeleniumDriver/chromedriver.exe"
url = "https://www.google.com"
search_val = "Flipkart"
driver = webdriver.Chrome(path)
driver.get(url)
driver.implicitly_wait(10)
driver.maximize_window()
driver.find_element(By.CSS_SELECTOR, "#yDmH0d > c-wiz > div > div > c-wiz > div > div > div > div.DRc6kd.bdn4dc > div.QlyBfb > button").click()
#driver.find_element(By.ID, "input").send_keys(search_val).click()
'''
wb = Workbook()
ws = wb.active
#ws["A1"]= "What is your name?"
'''list = [["Employee_name","Emp_No"],["Ram", "1"],["Raghu", "2"]]
for value in list:
ws.append(value)
wb.save("newexcel.xlsx")
'''
for r in range(1, 8):
for c in range(1, 4):
ws.cell(row=r, column=c).value = r+c
print("values added into excel") | Sahupreeti/SeleniumProject | test.py | test.py | py | 867 | python | en | code | 0 | github-code | 13 |
71362832339 | import win32com.client
from os import path
#####send
outlook = win32com.client.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
#mail.To = ''
#mail.Subject = 'Hello this is you!'
#mail.Body = 'Hello!!!!!!'
#mail.HTMLBody = '<h2>This is an H2 message</h2>' #this field is optional
# To attach a file to the email (optional):
#attachment = "C:/Users/OneDrive/Documents/Desktop/Social_Network_Ads.csv"
#mail.Attachments.Add(attachment)
#mail.Send()
#########read
outlook = win32com.client.Dispatch("Outlook.Application").GetNamespace("MAPI")
inbox = outlook.GetDefaultFolder(6) # "6" refers to the index of a folder - in this case,
# the inbox. You can change that number to reference
# any other folder
messages = inbox.Items
import sys
message = messages.GetFirst()
attachments=message.Attachments
rec_time = message.CreationTime
body_content = message.body
subj_line = message.subject
while message:
print(message.subject,message.CreationTime,message.body)#,(message.Sender.GetExchangeUser().PrimarySmtpAddress)
message = messages.GetNext()
| Cazeho/Email | outlook.py | outlook.py | py | 1,203 | python | en | code | 1 | github-code | 13 |
41445556431 | # -*- coding: utf-8 -*-
"""
cron: 7 21 * * *
new Env('智友邦');
"""
import re
import requests
from notify_mtr import send
from utils import get_data
class Zhiyoo:
def __init__(self, check_items):
self.check_items = check_items
@staticmethod
def sign(session):
response = session.get(
url="http://bbs.zhiyoo.net/plugin.php?id=dsu_paulsign:sign", verify=False
)
formhash = re.findall(
r'<input type="hidden" name="formhash" value="(.*?)"', response.text
)[0]
data = {"formhash": formhash, "qdxq": "kx"}
params = (
("id", "dsu_paulsign:sign"),
("operation", "qiandao"),
("infloat", "1"),
("inajax", "1"),
)
response = session.post(
url="http://bbs.zhiyoo.net/plugin.php",
params=params,
data=data,
verify=False,
)
user_resp = session.get(url="http://bbs.zhiyoo.net/home.php")
uid = re.findall(r"uid=(\d+)\"", user_resp.text)
uid = uid[0] if uid else "未获取到 UID"
if "今日已经签到" in response.text:
return f"用户信息: {uid}\n签到信息: 您今日已经签到,请明天再来!"
check_msg = re.findall(r"恭喜你签到成功!获得随机奖励 金币 (\d+) 元.", response.text, re.S)
check_msg = check_msg[0].strip() if check_msg else "签到失败"
return f"用户信息: {uid}\n签到信息: 恭喜你签到成功!获得随机奖励 金币 {check_msg} 元."
def main(self):
msg_all = ""
for check_item in self.check_items:
cookie = {
item.split("=")[0]: item.split("=")[1]
for item in check_item.get("cookie").split("; ")
}
session = requests.session()
session.cookies.update(cookie)
session.headers.update(
{
"Origin": "http://bbs.zhiyoo.net",
"Content-Type": "application/x-www-form-urlencoded",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/89.0.4389.90 Safari/537.36 Edg/89.0.774.54",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,image/apng,*/*;"
"q=0.8,application/signed-exchange;v=b3;q=0.9",
"Referer": "http://bbs.zhiyoo.net/plugin.php?id=dsu_paulsign:sign",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
}
)
msg = self.sign(session)
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
_data = get_data()
_check_items = _data.get("ZHIYOO", [])
result = Zhiyoo(check_items=_check_items).main()
send("智友邦", result)
| OreosLab/checkinpanel | ck_zhiyoo.py | ck_zhiyoo.py | py | 2,959 | python | en | code | 1,380 | github-code | 13 |
19794978540 | import numpy as np
import scipy.sparse as sparse
from liblinear.liblinearutil import train
__all__ = ['train_1vsrest', 'evaluate']
def train_1vsrest(y: sparse.csr_matrix, x: sparse.csr_matrix, options: str):
"""
Trains a linear model for multiabel data using a one-vs-all strategy.
Returns the model.
y is a 0/1 matrix with dimensions number of instances * number of classes.
x is a matrix with dimensions number of instances * number of features.
options is the option string passed to liblinear.
"""
if options.find('-R') != -1:
raise ValueError('-R is not supported')
bias = -1.
if options.find('-B') != -1:
options_split = options.split()
i = options_split.index('-B')
bias = float(options_split[i+1])
options = ' '.join(options_split[:i] + options_split[i+2:])
x = sparse.hstack([
x,
np.full((x.shape[0], 1), bias),
], 'csr')
y = y.tocsc()
nr_class = y.shape[1]
nr_feature = x.shape[1]
weights = np.zeros((nr_feature, nr_class), order='F')
for i in range(nr_class):
yi = y[:, i].toarray().reshape(-1)
modeli = train(yi, x, options)
w = np.ctypeslib.as_array(modeli.w, (nr_feature,))
# liblinear label mapping depends on data, we ensure
# it is the same for all labels
if modeli.get_labels()[0] == 0:
w = -w
weights[:, i] = w
return {'weights': np.asmatrix(weights), '-B': bias}
def predict_values(model, x: sparse.csr_matrix) -> np.ndarray:
"""
Calculates the decision values associated with x.
Returns a matrix with dimension number of instances * number of classes.
x is a matrix with dimension number of instances * number of features.
"""
bias = model['-B']
bias_col = np.full((x.shape[0], 1 if bias > 0 else 0), bias)
nr_feature = model['weights'].shape[0]
nr_feature -= 1 if bias > 0 else 0
if x.shape[1] < nr_feature:
x = sparse.hstack([
x,
np.zeros((x.shape[0], nr_feature - x.shape[1])),
bias_col,
], 'csr')
else:
x = sparse.hstack([
x[:, :nr_feature],
bias_col,
], 'csr')
return (x * model['weights']).A
| chihming/LibMultiLabel | libmultilabel/linear/linear.py | linear.py | py | 2,283 | python | en | code | null | github-code | 13 |
41905013716 | import syslog, os, sys
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class win(QWidget):
def __init__(self, USER, HOST, SERVICE):
super(win, self).__init__()
self.AUTH = None
if SERVICE == "sshd-key": self.AUTH = "SSH public-key authentication"
elif SERVICE == "sshd": self.AUTH = "SSH password authentication"
if SERVICE in ['sshd','sshd-key']:
self.SERVICE = "SSH"
else:
self.SERVICE = SERVICE
self.w = QMessageBox()
self.w.setIconPixmap(QPixmap('/usr/share/pam-accesscontrol/img/lock.gif'))
icon_label = self.w.findChild(QLabel,"qt_msgboxex_icon_label")
movie = QMovie('/usr/share/pam-accesscontrol/img/lock.gif')
setattr(self.w,'icon_label',movie)
icon_label.setMovie(movie)
movie.start()
def close(self):
self.TEXT = "Connection closed by remote host.\n\nUser: " + USER + "\nHost: " + HOST
if self.AUTH:
self.TEXT = self.TEXT + "\n\nAuthentication: "+ self.AUTH
self.w.setWindowTitle(self.tr(self.SERVICE + ': connection closed'))
self.w.setText(self.TEXT)
self.w.exec_()
def ask(self):
self.TEXT = "New incoming " + self.SERVICE + " connection has been established. " + \
"Do you want to allow it?\n\nUser: " + USER + "\nHost: " + HOST
if self.AUTH:
self.TEXT = self.TEXT + "\n\nAuthentication: "+ self.AUTH
self.w.setWindowTitle(self.tr('New ' + self.SERVICE + ' connection'))
self.w.setText(self.TEXT)
self.w.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
self.w.setDefaultButton(QMessageBox.No)
if self.w.exec_() == QMessageBox.Yes:
sys.exit(0)
else:
sys.exit(1)
def xorg(self):
self.w.setGeometry(100, 50, 100, 100)
self.TEXT = "ACCESS DENIED\n\n\nLogin not possible for user '" + USER + "'"
self.w.setWindowTitle(self.tr('ACCESS DENIED'))
self.w.setText(self.TEXT)
self.w.exec_()
if __name__ == '__main__':
if (len(sys.argv) != 5) or sys.argv[1] not in ["ask","info","xorg"]:
print ("usage: " + sys.argv[0] + " [ask | info | xorg] HOST USER PAM-SERVICE")
sys.exit(1)
if sys.argv[2] == "::1":
HOST = "localhost"
else:
HOST = sys.argv[2]
USER = sys.argv[3]
SERVICE = sys.argv[4]
app = QApplication(sys.argv)
if sys.argv[1] == "ask": win(USER, HOST, SERVICE).ask()
elif sys.argv[1] == "info": win(USER, HOST, SERVICE).close()
elif sys.argv[1] == "xorg": win(USER, HOST, SERVICE).xorg()
| alexander-naumov/pam-accesscontrol | usr/share/pam-accesscontrol/windows.py | windows.py | py | 2,463 | python | en | code | 3 | github-code | 13 |
70078935379 | import os
import sys
from pathlib import Path
from platform import system
from shutil import which
from subprocess import call
from typing import Optional, Tuple
from pandocfilters import toJSONFilter, Para, Image, get_filename4code, get_caption, get_extension
def get_cache_dir() -> Path:
"""Get a cache directory on Unix and Windows systems."""
if system() == "Windows":
data = os.environ["LOCALAPPDATA"]
else:
data = os.getenv("XDG_CACHE_HOME", Path.home() / ".cache")
return Path(data).resolve()
def get_tmpdir_info(code: str) -> Tuple[Path, str]:
"""
Get information on where to put a temporary plantuml file.
:param code: The contents of a code block.
:return: A tuple of (tmp_dir, content_hash).
"""
# force get_filename4code to clean up after itself
with open(os.devnull, "w") as devnull:
err_tmp = sys.stderr
try:
os.environ["PANDOCFILTER_CLEANUP"] = "y"
sys.stderr = devnull
tmp_file_data = get_filename4code("plantuml", code)
finally:
del os.environ["PANDOCFILTER_CLEANUP"]
sys.stderr = err_tmp
tmp_file_base = Path(tmp_file_data)
tmp_dir = tmp_file_base.parent
filehash = tmp_file_base.name
return tmp_dir, filehash
def plantuml(key: str, value: str, format: str, meta: dict) -> Optional:
if key == "CodeBlock":
[[ident, classes, keyvals], code] = value
if any(c in {"plantuml", "puml", "uml"} for c in classes):
# add start and end directives if necessary
if not code.startswith("@start"):
code = f"@startuml\n{code}\n@enduml\n"
tmp_dir, filehash = get_tmpdir_info(code)
# if we can convert svg, we prefer it over raster images
svg_converter = which("rsvg-convert")
default = "svg" if svg_converter else "png"
filetype = get_extension(format, default, html="svg", latex="eps")
cache = get_cache_dir() / "pandoc/plantuml"
cache.mkdir(parents=True, exist_ok=True)
src = tmp_dir / f"{filehash}.uml"
dest = str(cache / f"{filehash}.{filetype}")
if not os.path.isfile(dest):
with open(src, "w") as f:
f.write(code)
call(["plantuml", f"-t{filetype}", "-output", cache, src])
sys.stderr.write("Created image " + dest + "\n")
caption, typef, keyvals = get_caption(keyvals)
return Para([Image([ident, [], keyvals], caption, [dest, typef])])
if __name__ == "__main__":
toJSONFilter(plantuml)
# from filter_debug_utils import run_dbg_filter
# output = run_dbg_filter(plantuml, "test.md")
# print(output)
| FynnFreyer/.dotfiles | pandoc/.local/share/pandoc/filters/plantuml.py | plantuml.py | py | 2,775 | python | en | code | 0 | github-code | 13 |
74912781137 | import pandas as pd
import sqlalchemy as sql
def etl(data_path: str, engine: sql.engine.Engine) -> None:
"""
Esta función realiza una ETL en un archivo CSV y carga los datos en una tabla de una base de datos.
Args:
- data_path (str): La ruta del archivo CSV a leer.
- engine (sqlalchemy.engine.Engine): El objeto Engine de SQLAlchemy para conectarse a la base de datos.
Returns: None
"""
# Leer archivo CSV
df = pd.read_csv(filepath_or_buffer=data_path)
# Renombrar columnas
df.rename(
columns={
"payment_sequential": "sequential",
"payment_type": "type",
"payment_installments": "installments",
"payment_value": "value",
},
inplace=True,
)
# Conectarse a la base de datos
with engine.connect() as conn:
# Crear tabla "order_payments" si no existe
conn.execute(
"""
CREATE TABLE IF NOT EXISTS `data_warehouse_olist`.`order_payments` (
`order_id` VARCHAR(45) NOT NULL,
`sequential` INT NOT NULL,
`type` VARCHAR(45) NOT NULL,
`installments` INT NOT NULL,
`value` DECIMAL (10,2) NOT NULL,
FOREIGN KEY (`order_id`) REFERENCES `data_warehouse_olist`.`orders` (`order_id`)
);
"""
)
# Cargar los datos en la tabla "order_payments"
df.to_sql(
name="order_payments",
con=engine,
index=False,
if_exists="append",
)
| agusdm97/PF-DTS05-E-COMMERCE-OLIST | data_warehouse/etl_module/order_payment.py | order_payment.py | py | 1,571 | python | es | code | 1 | github-code | 13 |
73605353298 | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author: 花菜
# @File: 70爬楼梯.py
# @Time : 2022/11/8 20:01
# @Email: lihuacai168@gmail.com
# 70爬楼梯
# 假设你正在爬楼梯。需要 n 阶你才能到达楼顶。
#
# 每次你可以爬 1 或 2 个台阶。你有多少种不同的方法可以爬到楼顶呢?
#
#
#
# 示例 1:
#
#
# 输入:n = 2
# 输出:2
# 解释:有两种方法可以爬到楼顶。
# 1. 1 阶 + 1 阶
# 2. 2 阶
#
# 示例 2:
#
#
# 输入:n = 3
# 输出:3
# 解释:有三种方法可以爬到楼顶。
# 1. 1 阶 + 1 阶 + 1 阶
# 2. 1 阶 + 2 阶
# 3. 2 阶 + 1 阶
#
#
#
#
# 提示:
#
#
# 1 <= n <= 45
#
#
# Related Topics 记忆化搜索 数学 动态规划 👍 2727 👎 0
# leetcode submit region begin(Prohibit modification and deletion)
class Solution:
def climbStairs(self, n: int) -> int:
if n < 2:
return n
dp = [0 for _ in range(n + 1)]
dp[1] = 1
dp[2] = 2
for i in range(3, n + 1):
dp[i] = dp[i - 1] + dp[i - 2]
return dp[n]
# leetcode submit region end(Prohibit modification and deletion)
| lihuacai168/LeetCode | 动态规划/70爬楼梯.py | 70爬楼梯.py | py | 1,142 | python | zh | code | 4 | github-code | 13 |
36198501612 | import argparse
import openai_secret_manager
import langchain
# Load OpenAI API credentials from environment variables
secrets = openai_secret_manager.get_secret("openai")
# Set up the OpenAI API client
client = openai.SecretManagerClient(api_key=secrets["api_key"])
# Define the maximum length of each input chunk (in tokens)
CHUNK_SIZE = 512
def refine(input_text, model):
# Create a LangChain instance with the appropriate language model
lc = langchain.LangChain(
model_name_or_path="gpt3",
tokenizers=["text"],
max_length=CHUNK_SIZE,
device="cpu",
)
# Prepare the input using LangChain
input_ids = lc.encode(input_text)
# Generate output using the OpenAI API
response = client.gpt3.generate(
prompt=input_text,
model=model,
max_tokens=1024,
temperature=0.7,
)
# Allow the user to modify the generated text
output_text = input(f"Original:\n{response.choices[0].text}\n\nModified:\n")
# Concatenate the modified text with the original input
modified_input = input_text + output_text
# Send the modified input back to the OpenAI API for further refinement
refined_response = client.gpt3.generate(
prompt=modified_input,
model=model,
max_tokens=1024,
temperature=0.7,
)
# Print the refined output
print(refined_response.choices[0].text)
def main():
# Set up command-line argument parser
parser = argparse.ArgumentParser(description="Refine generated text using input from the user.")
parser.add_argument("input_text", help="text to refine")
parser.add_argument("--model", default="text-davinci-002", help="name of the GPT-3 model to use (default: text-davinci-002)")
args = parser.parse_args()
# Refine the input text using the OpenAI API and user input
refine(args.input_text, args.model)
if __name__ == "__main__":
main()
| greydoubt/grindGPT | refine.py | refine.py | py | 1,933 | python | en | code | 0 | github-code | 13 |
1617782366 | import time
import numpy as np
from matplotlib import pyplot as plt
from numba import jit, prange
from scipy.sparse import csr_matrix
from importlib import reload
import params
reload(params)
from params import *
from subprocess import call
from os import listdir
#{{{ Timer, initialize, integrate over electrons, plot
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self,*args):
self.stop = time.time()
self.interval = self.stop - self.start
@jit(nogil=True)
def find_index(L, val):
#if (type(val)==int or type(val) ==float or type(val) ==np.float64):
# return L.index(min(L, key=lambda x:abs(x-val)))
#elif (type(L)==list and ((type(val)==np.ndarray) or (type(val)==list))):
# a = [0 for x in range(len(val))]
# for i in range(len(val)):
# a[i] = int((L.index(min(L, key=lambda x:abs(x-val[i])))))
# return a
#elif (type(L)==np.ndarray and ((type(val)==np.ndarray) or (type(val)==list))):
a = np.zeros(val.shape,dtype=int)
for i in range(val.shape[0]):
a[i] = int(round(np.abs(L - val[i]).argmin()))
return a
if hard_bc_nuc == True:
Ri_cut = find_index(Rx,R_cut)
else:
Ri_cut = 0
def gap_photon(gap):
gap_eV = gap*27.2114
nu_s = gap_eV/(4.136*10**(-15))
nu_au = nu_s*(2.419*10**(-17))
return nu_au
#for initial wavefunction as direct product gaussians
#nuclear wavefunctions are also gaussians, rewritten in reduced coordinates
#By construction with R = R1-R2 for an initial separation of R0, along the R axis, R2 is at 0
# while R1 is at R0
@jit(nogil=True)
def gauss(r1x,r1x0,r1sigma2, r2x,r2x0,r2sigma2,R,R1x0,R1sigma2,R2sigma2):
return np.exp(-(r1x-r1x0)**2/r1sigma2)*np.exp(-(r2x-r2x0)**2/r2sigma2)\
*np.exp(-(mu*R/M1-R1x0)**2/R1sigma2)*np.exp(-(-mu*R/M2)**2/R2sigma2) + 0j
psi0 = np.zeros((len(r1x),len(r2x),len(Rx))) + 0j
#print(len(psi0[0][:][0]))
@jit(nogil=True)
def calc_psi0(psi0):
print('Calculating the initial Wavefunction with numba')
#position array index values
r1i = 0
r2i = 0
Ri = 0
#wavefunction norm calculated on the fly
psi2 = 0
for r1 in r1x:
r2i=0
for r2 in r2x:
Ri=0
for R in Rx[Ri_cut:]: #from the nuclear cut off onwards
psi0[r1i,r2i,Ri] = gauss(r1,Rx[Ri]/2,re_sig2,r2,-Rx[Ri]/2,re_sig2,R,R0,Rsig2,Rsig2)\
+gauss(r2,Rx[Ri]/2,re_sig2,r1,-Rx[Ri]/2,re_sig2,R,R0,Rsig2,Rsig2)
#add to the discretized integral of psi**2
psi2 += psi0[r1i,r2i,Ri]**2
Ri += 1
r2i += 1
r1i += 1
#complete the integral of psi**2 and take the square root
psi2 = np.sqrt(psi2*e_spacing**2*n_spacing)
#normalize the wavefunction
psi0 = psi0/psi2
np.save('./psi0',psi0)
return psi0
def load_psi0():
return np.load('./psi0.npy')
def T_test_psi(psi):
testpsi = np.zeros(psi.shape)
for x1i in range(0,len(r1x)):
for x2i in range(0,len(r2x)):
for Ri in range(Ri_cut,len(Rx)):
testpsi[x1i,x2i,Ri] += np.exp(r1x[x1i] + r2x[x2i] + Rx[Ri])
return testpsi
@jit(nogil=True)
def integrate_over_electrons(psi):
print('Integrating over electronic coordinates')
return e_spacing**2*sum(sum(np.abs(psi)**2))
@jit(nogil = True)
def one_electron_density(psi):
print('Calculating one electron Density')
psi2 = np.abs(psi)**2
out = np.zeros(len(r1x))
for r1i in range(0,len(r1x)):
out[r1i] = sum(np.sum(psi2[r1i,:,:],0))
for r2i in range(0,len(r2x)):
out[r2i] += sum(np.sum(psi2[:,r2i,:],0))
return out*e_spacing*n_spacing
@jit(nogil=True)
def normalize(psi):
psi2 = np.abs(psi)**2
norm = np.sqrt(sum(sum(sum(psi2)))*n_spacing*e_spacing**2)
print('norm is ', norm)
return psi/(norm)
def plot_rho_R(psi):
with Timer() as t:
rho_R = integrate_over_electrons(psi)
print('Time to integrate over electrons ', t.interval)
plt.plot(Rx, rho_R,'.')
plt.show()
#relies on r1x and r2x being the same size
def plot_rho_r(psi):
with Timer() as t:
rho_r = one_electron_density(psi)
print('Time to generate one electron densities', t.interval)
plt.plot(r1x, rho_r,'.')
plt.show()
#}}}
#{{{ calc_V, calc_T, H, calc_E
@jit(nogil=True)
def construct_V(V):
for r1i in range(0,len(r1x)):
for r2i in range(0,len(r2x)):
#electron electron interaction
V[r1i,r2i,:] += np.sqrt( (r1x[r1i] - r2x[r2i])**2 + Cr2)**(-1)
#grab the r1 electron nuclear interactions while looping over r1
for Ri in range(Ri_cut,len(Rx)):
V[r1i,:,Ri] += (-np.sqrt( (r1x[r1i] - mu*Rx[Ri]/M1)**2 + Cr2)**(-1) -\
np.sqrt( (r1x[r1i] + mu*Rx[Ri]/M2)**2 + Cr2)**(-1))
for Ri in range(Ri_cut,len(Rx)):
#Nuclear Interaction
V[:,:,Ri] += Rx[Ri]**(-1)
#grab the r2 electron nuclear interactions while looping over R
for r2i in range(0,len(r2x)):
V[:,r2i,Ri] += (-np.sqrt( (r2x[r2i] - mu*Rx[Ri]/M1)**2 + Cr2)**(-1) -\
np.sqrt( (r2x[r2i] + mu*Rx[Ri]/M2)**2 + Cr2)**(-1))
return V
V_kernel = construct_V(np.zeros( (len(r1x), len(r2x), len(Rx)) )) + 0j
@jit(nopython=True,parallel=True,nogil=True)
def calc_V(psi,V):
return V*psi
#Because of reduction in dimensionality inherent in finite difference, to avoid everything having to go from [1:-1]
#The derivative value at the end points is set to 0
#can speed these up via sparse matrix formatting
Lap_e1 = np.zeros((len(r1x), len(r1x)))
Lap_e2 = np.zeros((len(r2x),len(r2x)))
Lap_n = np.zeros((len(Rx),len(Rx)))
for i in range(0,len(r1x)):
for j in range(0,len(r1x)):
if i==j:
Lap_e1[i,j] = -2
if j==i+1 or j==i-1:
Lap_e1[i,j] = 1
for i in range(0,len(r2x)):
for j in range(0,len(r2x)):
if i==j:
Lap_e2[i,j] = -2
if j==i+1 or j==i-1:
Lap_e2[i,j] = 1
for i in range(0,len(Rx)):
for j in range(0,len(Rx)):
if i==j:
Lap_n[i,j] = -2
if j==i+1 or j==i-1:
Lap_n[i,j] = 1
mu_e = (M1+M2)/(M1+M2+1)
#Complete calculation of the lapalacian and divide by twice the mass, multiplying my -1
T_e1 = csr_matrix((-1/(2*mu_e*e_spacing**2))*Lap_e1 + 0j)
T_e2 = csr_matrix((-1/(2*mu_e*e_spacing**2))*Lap_e2 + 0j)
T_n = csr_matrix((-1/(2*mu*n_spacing**2))*Lap_n+ 0j)
T_buff = np.zeros( (len(r1x), len(r2x), len(Rx)) ) + 0j
@jit(nogil=True)
def calc_T(psi, buff):
buff[:,:,:] = 0j
for x1i in range(0,len(r1x)):
for x2i in range(0,len(r2x)):
buff[x1i,x2i,:] += T_n.dot(psi[x1i,x2i,:])
for Ri in range(Ri_cut,len(Rx)):
for x2i in range(0,len(r2x)):
buff[:,x2i,Ri] += T_e1.dot(psi[:,x2i,Ri])
buff[x2i,:,Ri] += T_e2.dot(psi[x2i,:,Ri])
return buff
#This is faster, and returns identical results to the above on its own, but differen when inside H
Te_tot = np.kron(T_e1,T_e2)
@jit(nogil=True)
def calc_T2(psi, buff):
buff[:,:,:] = 0j
for Ri in range(Ri_cut,len(Rx)):
buff[:,:,Ri] += Te_tot.dot(psi[:,:,Ri])
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
buff[r1i,r2i,:] += T_n.dot(psi[r1i,r2i,:])
return buff
@jit(nogil=True)
def H(psi):
return calc_V(psi, V_kernel)+calc_T(psi, T_buff)
@jit(parallel=True, nogil=True)
def D(psi):
Dpsi = np.zeros(psi.shape)+0j
for r2i in prange(psi.shape[1]):
for Ri in prange(psi.shape[2]):
Dpsi[:,r2i,Ri] -= psi[:,r2i,Ri]*r1x[:]
for r1i in prange(psi.shape[0]):
for Ri in prange(psi.shape[2]):
Dpsi[r1i,:,Ri] -= psi[r1i,:,Ri]*r2x[:]
if(M1 != M2):
lamb = (M2-M1)/(M2+M1)
for r1i in prange(psi.shape[0]):
for r2i in prange(psi.shape[1]):
Dpsi[r1i,r2i,:] += lamb*Rx[:]*psi[r1i,r2i,:]
return Dpsi
@jit(nogil=True)
def shape_E(tf_laser,dt, A, form):
time_steps = int(tf_laser/dt) + 1
Eform = np.zeros(time_steps)
#frequency in s is 6.177*10**15 Hz,
#calculated from difference between BO gs and excited around peak of nuclear wavepacket
#with conversion factor of 2.419*10**(-17) s / au -> .1494 Hz,au
if (form=='sin2'):
for i in range(time_steps):
Eform[i] = np.sin(2*np.pi*nu*i*dt)*np.sin(np.pi*i*dt/(tf_laser))**2
elif (form=='ramp'):
for i in range(time_steps):
if(i*dt<10*optical_cycle):
Eform[i] = np.sin(2*np.pi*nu*i*dt)*(i*dt/(10.*optical_cycle))
elif(i*dt>10*optical_cycle and i*dt < tf_laser-10*optical_cycle):
Eform[i] = np.sin(2*np.pi*nu*i*dt)
else:
Eform[i] = np.sin(2*np.pi*nu*i*dt)*(tf_laser/(10*optical_cycle)-(i*dt/(10*optical_cycle)))
return A*Eform
@jit(nogil=True)
def Ht(psi,Et):
return calc_V(psi, V_kernel)+calc_T(psi, T_buff) + Et*D(psi)
@jit(nogil=True)
def calc_E(psi):
Hpsi = H(psi)
return sum(sum(sum(np.conj(psi)*Hpsi)))*n_spacing*e_spacing**2
@jit(nogil=True)
def calc_Et(psi,Et):
Hpsi = Ht(psi,Et)
return sum(sum(sum(np.conj(psi)*Hpsi)))*n_spacing*e_spacing**2
#}}}
@jit(nogil=True)
def inner_product_full(bra,ket):
return sum(sum(sum(np.conj(bra)*ket)))*n_spacing*e_spacing**2
#This requires the psi and kets to be normalized
@jit(nogil=True)
def filter_projection(psi, kets):
#where psi is the wavefunction to be filtered and ket is the ket to filter out
projection = np.zeros(psi.shape) +0j
for ket_i in kets:
projection += inner_product_full(ket_i,psi)*ket_i
#projection += inner_product_full(psi,ket_i)*psi
return psi - projection
#{{{ imaginary propagation
@jit(nogil=True)
def imag_prop_kernel(psi_in,tau):
return psi_in - tau*H(psi_in)
#plotting wrapper for recursive function imag_prop
def gs_relax(psi0, tau, tol, plotDensity):
psi = np.zeros(psi0.shape) + 0j
plt.ion()
if plotDensity==True:
fig1, (ax_e,ax_n, ax_E) = plt.subplots(3,1)
plt.pause(0.0001)
fig_rho_e, =ax_e.plot(r1x, one_electron_density(psi0),'.')
fig_rho_n, = ax_n.plot(Rx, integrate_over_electrons(psi0),'.')
else:
fig1, ax_E = plt.subplots(1,1)
E_gs = [calc_E(psi0)]
iter_gs_plot = [0]
iter_gs = 0
fig_E, = ax_E.plot(iter_gs,E_gs, '.')
plt.pause(0.0001)
fig1.canvas.draw()
fig1.canvas.flush_events
psi = imag_prop_kernel(psi0,tau)
psi = normalize(psi)
iter_gs += 1
E_gs.append(calc_E(psi))
while(abs((-E_gs[-2] + E_gs[-1])) > tol):
psi = imag_prop_kernel(psi,tau)
psi = normalize(psi)
iter_gs += 1
print('E diff is ', abs((-E_gs[-2] + E_gs[-1])))
if(iter_gs%10 == 0):
#needed two E_gs vals to initiate while loop, but due to jumping of energy between calls
#have to reset the second E_gs val to have plotting resolution at low E
if(iter_gs==10):
E_gs[-1] = calc_E(psi)
else:
E_gs.append(calc_E(psi))
iter_gs_plot.append(iter_gs)
plt.pause(0.0001)
if plotDensity==True:
fig_rho_e.set_ydata(one_electron_density(psi))
fig_rho_n.set_ydata(integrate_over_electrons(psi))
ax_e.relim()
ax_e.autoscale()
ax_n.relim()
ax_n.autoscale()
fig_E.set_xdata(iter_gs_plot)
fig_E.set_ydata(E_gs)
ax_E.relim()
ax_E.autoscale()
fig1.canvas.draw()
fig1.canvas.flush_events
np.save('./gs-psi-tol' + str(tol), psi)
#Excited state relaxation based around projection filtering
#first order Taylor expansion of exp(-H1 * tau)
#where H1 = (1-P0)H(1-P0)
@jit(nogil=True)
def es_relax_kernel(psi_in, psi_gs, tau):
return psi_in - tau*filter_projection(H(filter_projection(psi_in, psi_gs)),psi_gs)
def es_relax(psi0,psi_gs,tau,tol,plotDensity):
plt.ion()
if plotDensity==True:
fig1, (ax_e,ax_n, ax_E) = plt.subplots(3,1)
plt.pause(0.0001)
fig_rho_e, =ax_e.plot(r1x, one_electron_density(psi0),'.')
fig_rho_n, = ax_n.plot(Rx, integrate_over_electrons(psi0),'.')
else:
fig1, ax_E = plt.subplots(1,1)
E_ex1 = [calc_E(psi0)]
iter_ex1_plot = [0]
iter_ex1 = 0
fig_E, = ax_E.plot(iter_ex1,E_ex1, '.')
plt.pause(0.0001)
fig1.canvas.draw()
fig1.canvas.flush_events
psi = np.zeros(psi0.shape) + 0j
psi = es_relax_kernel(psi0,psi_gs,tau)
psi = normalize(psi)
iter_ex1 += 1
E_ex1.append(calc_E(psi))
while(abs((-E_ex1[-2] + E_ex1[-1])) > tol):
psi = es_relax_kernel(psi,psi_gs,tau)
psi = normalize(psi)
iter_ex1 += 1
print('E_diff is ', abs(-E_ex1[-2] + E_ex1[-1]))
if(iter_ex1%10 == 0):
#needed two E_ex1 vals to initiate while loop, but due to jumping of energy between calls
#have to reset the second E_ex1 val to have plotting resolution at low E
if(iter_ex1==10):
E_ex1[-1] = calc_E(psi)
else:
E_ex1.append(calc_E(psi))
iter_ex1_plot.append(iter_ex1)
if plotDensity==True:
plt.pause(0.0001)
fig_rho_e.set_ydata(one_electron_density(psi))
fig_rho_n.set_ydata(integrate_over_electrons(psi))
ax_e.relim()
ax_e.autoscale()
ax_n.relim()
ax_n.autoscale()
fig_E.set_xdata(iter_ex1_plot)
fig_E.set_ydata(E_ex1)
ax_E.relim()
ax_E.autoscale()
fig1.canvas.draw()
fig1.canvas.flush_events
else:
E_ex1.append(calc_E(psi))
iter_ex1_plot.append(iter_ex1)
plt.pause(0.0001)
fig_E.set_xdata(iter_ex1_plot)
fig_E.set_ydata(E_ex1)
ax_E.relim()
ax_E.autoscale()
fig1.canvas.draw()
fig1.canvas.flush_events
np.save('./ex1-psi-tol' + str(tol), psi)
#}}}
#{{{ steepest descent full
@jit(nogil=True)
def check_convergence(psi):
Hpsi = H(psi)
E = sum(sum(sum(np.conj(psi)*Hpsi)))*n_spacing*e_spacing**2
#difference vector
d_vec = Hpsi - E*psi
#distance from correct solution
d = np.sqrt(sum(sum(sum(np.conj(d_vec)*d_vec)))*n_spacing*e_spacing**2)
return d, E
#now the real challage here is that psi is of dimension (len(r1x),len(r2x),len(Rx))
#while BOgs is of dimension (len(Rx),len(r1x),len(r2x))
#calculate <\phi | \Psi> for each position of R, then construct the subtracted wavefunction as
# <\phi | \Psi> |\phi>
@jit(nogil=True)
def filter_BO_gs(psi_in, BOgs):
subtracted_psi = np.zeros(psi_in.shape) + 0j
phi_on_psi = 0j
for Ri in range(len(Rx)):
phi_on_psi = inner_product_el(BOgs[Ri],psi_in[:,:,Ri])
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
subtracted_psi[r1i,r2i,Ri] = phi_on_psi*BOgs[Ri,r1i,r2i]
return psi_in - subtracted_psi
@jit(nogil=True)
def calc_excited_psi_full():
psi0 = np.zeros((len(r1x),len(r2x),len(Rx)))
@jit(nogil=True)
def SD(tol, filter_BOgs, BOgs):
eigenvalues = []
if (filter_BOgs==False):
psi_m = calc_psi0(psi0)
#psi_m = normalize(np.ones(psi0.shape))
if (filter_BOgs == True):
#Make a god awful fist guess
psi_m = normalize(np.ones(psi0.shape))
conj = np.zeros(psi_m.shape) +0j
#calculate the 0th iteration eigenvalue guess
lam = calc_E(psi_m)
r = lam*psi_m - H(psi_m)
if(filter_BOgs == True):
r = filter_BO_gs(r,BOgs)
alpha = inner_product_full(r,r)/calc_E(r)
psi_m = psi_m + alpha*r
#check for convergence
dist, lam = check_convergence(psi_m)
#enter algorithm
while (dist > tol):
r = lam*psi_m - H(psi_m)
if(filter_BOgs == True):
r = filter_BO_gs(r,BOgs)
alpha = inner_product_full(r,r)/calc_E(r)
psi_m = psi_m + alpha*r
#chec convergence, update lambda
dist, lam = check_convergence(psi_m)
print('dist=',dist)
eigenvalues.append(lam)
return eigenvalues, psi_m
#}}}
#{{{ Time Propagation
@jit(nogil=True)
def RK4_routine(psi,tf,dt):
t=0.000000
i=0
while(t<tf):
K1 = -1j*H(psi)
K2 = -1j*H(psi + dt*K1/2)
K3 = -1j*H(psi+dt*K2/2)
K4 = -1j*H(psi+dt*K3)
psi += (dt/6)*(K1 + 2*K2 + 2*K3 + K4)
t+=dt
i+=1
if(i%psi_save_interval==0):
np.save('./dump/1e-icwf-comp/psi-'+"{:.6}".format(t),psi)
print("{:.6}".format(t))
print('Norm = ',np.sqrt(np.sum(np.sum(np.sum(np.abs(psi)**2)))*e_spacing**2*n_spacing))
@jit(nogil=True)
def particle_velocities(psi, e1_mesh, e2_mesh,R_mesh):
#number of trajectoeries changes
nt = e1_mesh.shape[0]
e1_vel = np.zeros(nt)
e2_vel = np.zeros(nt)
R_vel = np.zeros(nt)
for i in range(nt):
e1_vel[i] = (np.gradient(psi[:,e2_mesh[i],R_mesh[i]],e_spacing)[e1_mesh[i]]/psi[e1_mesh[i],e2_mesh[i],R_mesh[i]]).imag
e2_vel[i] = (np.gradient(psi[e1_mesh[i],:,R_mesh[i]],e_spacing)[e2_mesh[i]]/psi[e1_mesh[i],e2_mesh[i],R_mesh[i]]).imag
R_vel[i] = (np.gradient(psi[e1_mesh[i],e2_mesh[i],:],n_spacing)[R_mesh[i]]/psi[e1_mesh[i],e2_mesh[i],R_mesh[i]]).imag
#print('e1_vel[0] = ', e1_vel[0])
#plt.plot((np.gradient(psi[:,e2_mesh[0],R_mesh[0]],e_spacing)/psi[e1_mesh[0],e2_mesh[0],R_mesh[0]]).imag)
#plt.show()
return e1_vel, e2_vel, R_vel/mu
@jit(nogil=True)
def T_dep_RK4_routine(psi,tf,tf_laser,dt, A, form):
t=0.000000
t_index = 0
E_form = shape_E(tf_laser,.5*dt, A, form)
dir_name = './sym-psi-t-form-'+form+'-A-'+str(Amplitude)+'-tf-laser-'\
+str(tf_laser)+'-tf-tot-'+str(tf_tot) + '-nu-'+str(nu)
err = call(['mkdir', dir_name])
call(['cp','params.py',dir_name+'/README'])
e1_mesh = np.load('./dump/1e/init_e1_mesh.npy')
e2_mesh = np.load('./dump/1e/init_e2_mesh.npy')
R_mesh = np.load('./dump/1e/init_R_mesh.npy')
pos_e1 = r1x_array[e1_mesh]
pos_e2 = r2x_array[e2_mesh]
pos_R = Rx_array[R_mesh]
psi_buff = np.zeros(psi.shape, dtype=np.complex128)
e1_mesh_buff = np.zeros(num_trajs)
e2_mesh_buff = np.zeros(num_trajs)
R_mesh_buff = np.zeros(num_trajs)
#if(err != 0):
# return 'Check psi Directory'
while(t<tf_laser-dt):
E_t = E_form[2*t_index]
E_t_half = E_form[2*t_index+1]
E_t_adv = E_form[2*t_index+2]
K1 = -1j*Ht(psi, E_t)
e1_K1, e2_K1, R_K1 = particle_velocities(psi,e1_mesh,e2_mesh,R_mesh)
psi_buff = psi+dt*K1/2
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K1/2)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K1/2)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K1/2)
K2 = -1j*Ht(psi_buff, E_t_half)
e1_K2, e2_K2, R_K2 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi_buff = psi+dt*K2/2
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K2/2)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K2/2)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K2/2)
K3 = -1j*Ht(psi_buff, E_t_half)
e1_K3, e2_K3, R_K3 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi_buff = psi+dt*K3
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K3)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K3)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K3)
K4 = -1j*Ht(psi_buff, E_t_adv)
e1_K4, e2_K4, R_K4 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi += (dt/6)*(K1 + 2*K2 + 2*K3 + K4)
pos_e1 += (dt/6)*(e1_K1 + 2*e1_K2 + 2*e1_K3 + e1_K4)
pos_e2 += (dt/6)*(e2_K1 + 2*e2_K2 + 2*e2_K3 + e2_K4)
pos_R += (dt/6)*(R_K1 + 2*R_K2 + 2*R_K3 + R_K4)
print('time=',t)
print('pos_e1[0] = ', pos_e1[0])
print('vel_e1[0] = ', (e1_K1[0] + 2*e1_K2[0] + 2*e1_K3[0] + e1_K4[0]))
print('pos_e2[0] = ', pos_e2[0])
print('vel_e2[0] = ', (e2_K1[0] + 2*e2_K2[0] + 2*e2_K3[0] + e2_K4[0]))
print('pos_R[0] = ', pos_R[0])
print('vel_R[0] = ', (R_K1[0] + 2*R_K2[0] + 2*R_K3[0] + R_K4[0]))
print('\n')
t+=dt
t_index +=1
if(t_index%psi_save_interval==0):
np.save(dir_name+'/psi-'+"{:.6}".format(t),psi)
print('Norm = ',np.sqrt(np.sum(np.sum(np.sum(np.abs(psi)**2)))*e_spacing**2*n_spacing))
print("{:.1f}".format(100*t/tf_tot)+'% done')
if(t>=tf_laser-1.5*dt):
while(t<tf):
K1 = -1j*H(psi)
e1_K1, e2_K1, R_K1 = particle_velocities(psi,e1_mesh,e2_mesh,R_mesh)
psi_buff = psi+dt*K1/2
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K1/2)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K1/2)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K1/2)
K2 = -1j*H(psi_buff)
e1_K2, e2_K2, R_K2 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi_buff = psi+dt*K2/2
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K2/2)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K2/2)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K2/2)
K3 = -1j*H(psi_buff)
e1_K3, e2_K3, R_K3 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi_buff = psi+dt*K3
e1_mesh_buff = find_index(r1x_array,pos_e1+dt*e1_K3)
e2_mesh_buff = find_index(r2x_array,pos_e2+dt*e2_K3)
R_mesh_buff = find_index(Rx_array,pos_R+dt*R_K3)
K4 = -1j*H(psi_buff)
e1_K4, e2_K4, R_K4 = particle_velocities(psi_buff,e1_mesh_buff,e2_mesh_buff,R_mesh_buff)
psi += (dt/6)*(K1 + 2*K2 + 2*K3 + K4)
pos_e1 += (dt/6)*(e1_K1 + 2*e1_K2 + 2*e1_K3 + e1_K4)
pos_e2 += (dt/6)*(e2_K1 + 2*e2_K2 + 2*e2_K3 + e2_K4)
pos_R += (dt/6)*(R_K1 + 2*R_K2 + 2*R_K3 + R_K4)
print('time=',t)
print('pos_e1[0] = ', pos_e1[0])
print('vel_e1[0] = ', (e1_K1[0] + 2*e1_K2[0] + 2*e1_K3[0] + e1_K4[0]))
print('pos_e2[0] = ', pos_e2[0])
print('vel_e2[0] = ', (e2_K1[0] + 2*e2_K2[0] + 2*e2_K3[0] + e2_K4[0]))
print('pos_R[0] = ', pos_R[0])
print('vel_R[0] = ', (R_K1[0] + 2*R_K2[0] + 2*R_K3[0] + R_K4[0]))
print('\n')
t+=dt
t_index +=1
if(t_index%psi_save_interval==0):
np.save(dir_name+'/psi-'+"{:.6}".format(t),psi)
print("{:.1f}".format(100*t/tf_tot)+'% done')
print('Norm = ',np.sqrt(np.sum(np.sum(np.sum(np.abs(psi)**2)))*e_spacing**2*n_spacing))
#note that t here is the time index not the absolute time
@jit(nogil=True)
def time_propagator_kernel(psi, t, dt, E_form_t, order, t_initial_index):
#check if the time iterator is within the laser iteration
if(t<tf_laser_index-t_initial_index):
if order==4:
psi = psi - 1j*Ht(psi, E_form_t)*dt - dt**2/2 * Ht(Ht(psi, E_form_t), E_form_t) \
+ 1j*dt**3/6 * Ht(Ht(Ht(psi, E_form_t), E_form_t), E_form_t) \
+ dt**4/24 * Ht(Ht(Ht(Ht(psi, E_form_t), E_form_t), E_form_t), E_form_t)
if order==3:
psi = psi - 1j*Ht(psi, E_form_t)*dt - dt**2/2 * Ht(Ht(psi, E_form_t), E_form_t) \
+ 1j*dt**3/6 * Ht(Ht(Ht(psi, E_form_t), E_form_t), E_form_t) \
if order==2:
psi = psi - 1j*Ht(psi, E_form_t)*dt - dt**2/2 * Ht(Ht(psi, E_form_t), E_form_t)
else:
if order==4:
psi = psi - 1j*H(psi)*dt - dt**2/2 * H(H(psi)) \
+ 1j*dt**3/6 * H(H(H(psi))) \
+ dt**4/24 * H(H(H(H(psi))))
if order==3:
psi = psi - 1j*H(psi)*dt - dt**2/2 * H(H(psi)) \
+ 1j*dt**3/6 * H(H(H(psi))) \
if order==2:
psi = psi - 1j*H(psi)*dt - dt**2/2 * H(H(psi))
return psi
#need to define global variable psi_t_file where the restart psi_t comes from
# if t_initial is 0 this doesn't matter
# if it is a restart run The old directory name will be changed at the end of the function to reflect the new simulation time
# NOTE!!!!, on restart, the total simulation time will continue to tf_tot, measured in absolute time
# So, if you are restarting at 250, and tf_tot is 300, the simulation will only propagate forward by 50
# This is also true for the laser.
# Make sure that for restarting runs with rampdown, that you give tf_laser as the restart time + 10*optical_cycle
# for smooth ramping
def Time_propagate(psi,dt,tf_tot, tf_laser, order,plotDensity, BOs, t_initial, psi_t_file):
#if this is not a restart run
if(t_initial==0):
#create the name for the folder where the wavefunctions will be stored
dir_name = './psi-t-form-'+form+'-A-'+str(Amplitude)+'-tf-laser-'+str(tf_laser)+'-tf-tot-'+str(tf_tot) + '-nu-'+str(nu)
dir_contents = listdir()
#if one exists under the same laser form, Amplitude, laser pulse duration and total duration
# ask if it should be deleted
if dir_name in dir_contents:
delete = input('Do you want to delete the currently existing folder? (y/n) ')
if (delete=='y'):
call(['rm', '-rf', dir_name])
#if it does not exist, make it
else:
call(['mkdir', dir_name])
#if this is a restart run
else:
#for now save everything in the old folder
dir_name = psi_t_file
#The wavefunction A, nu, and dt must be the same to pick the simulation back up
# not to mention the axes and everything else
throw_away = input('Double Check that the Amplitude, frequency and dt are the same as in the restart!\n Wavefunctions are going to be deleted next!!!!!!!!! (hit Enter to continue) ')
del throw_away
print('deleting existing wavefunctions past restart time')
#get existing directory contents
dir_contents = listdir(psi_t_file)
#go through the directory contents
for wavefunction in dir_contents:
#if the object in the directory is a wavefunction
if (wavefunction[0:4] == 'psi-'):
#check the time step of the wavefunction
#timestep goes from the dash, index 4, to (inclusive) the decimal point before npy, index -5
if (float(wavefunction[4:-4])>t_initial):
call(['rm', dir_name+'/'+wavefunction])
print('wavefunctions deleted, may god have mercy on your soul')
#save the parameters under which the simulation was run
#all READMEs should agree
call(['cp', 'params.py', dir_name+'/README'+str(t_initial)])
call(['touch', dir_name+'/ACTIVE'])
if(t_initial==0):
E_form = shape_E(tf_laser,dt,Amplitude, form)
else:
E_form = new_shape_E(t_initial, tf_laser, dt, Amplitude)
plt.ion()
if plotDensity==True:
fig1, (ax_e,ax_n, ax_E, ax_norm, ax_occu0, ax_occu1, ax_wf) = plt.subplots(7,1)
plt.pause(0.0001)
#fig_rho_e, = ax_e.plot(r1x, psi0[:,find_index(r2x,R0/4),find_index(Rx,R0)],'.')
fig_rho_e, = ax_e.plot(r1x, one_electron_density(psi),'.')
#fig_rho_n, = ax_n.plot(Rx, psi0[find_index(r2x,-R0/4),find_index(r2x,R0/4),:],'.')
fig_rho_n, = ax_n.plot(Rx, integrate_over_electrons(psi),'.')
else:
fig1, (ax_E, ax_norm, ax_occu0, ax_occu1, ax_wf) = plt.subplots(5,1)
E_t = [calc_Et(psi, 0)]
norm = [np.sqrt(sum(sum(sum(np.conj(psi)*psi)))*e_spacing**2*n_spacing)]
occupations = BO_occu(psi,BOs)
gs_occu = [occupations[0]]
ex_occu = [occupations[1]]
wf = [E_form[0]]
fig1.suptitle('Amplitude '+str(Amplitude) + ' Frequency '+str(nu))
iter_time_plot = [t_initial]
iter_time = t_initial
t_initial_index = int(t_initial/dt)
iter_time_step = 0
fig_E, = ax_E.plot(iter_time,E_t, '.')
fig_norm, = ax_norm.plot(iter_time,norm, '.')
fig_occu0, = ax_occu0.plot(iter_time,gs_occu, '.')
fig_occu1, = ax_occu1.plot(iter_time,ex_occu, '.')
fig_wf, = ax_wf.plot(iter_time,wf, '.')
plt.pause(0.0001)
fig1.canvas.draw()
fig1.canvas.flush_events
while (iter_time < tf_tot):
iter_time += dt
iter_time_step += 1
E_form_t = E_form[iter_time_step]
psi = time_propagator_kernel(psi, iter_time_step, dt, E_form_t, order, t_initial_index)
if (int(iter_time/dt)%plot_step==0):
iter_time_plot.append(iter_time)
if(iter_time < tf_laser):
E_t.append(calc_Et(psi,iter_time_step,E_form))
wf.append(E_form[iter_time_step])
else:
E_t.append(calc_E(psi))
wf.append(0)
norm.append(np.sqrt(sum(sum(sum(np.conj(psi)*psi)))*e_spacing**2*n_spacing))
occupations = BO_occu(psi,BOs)
gs_occu.append(occupations[0])
ex_occu.append(occupations[1])
plt.pause(0.0001)
if (plotDensity == True):
fig_rho_e.set_ydata(one_electron_density(psi))
fig_rho_n.set_ydata(integrate_over_electrons(psi))
ax_e.relim()
ax_e.autoscale()
ax_n.relim()
ax_n.autoscale()
fig_E.set_xdata(iter_time_plot)
fig_E.set_ydata(E_t)
ax_E.relim()
ax_E.autoscale()
fig_norm.set_xdata(iter_time_plot)
fig_norm.set_ydata(norm)
ax_norm.relim()
ax_norm.autoscale()
fig_occu0.set_xdata(iter_time_plot)
fig_occu1.set_xdata(iter_time_plot)
fig_occu0.set_ydata(gs_occu)
fig_occu1.set_ydata(ex_occu)
ax_occu0.relim()
ax_occu0.autoscale()
ax_occu1.relim()
ax_occu1.autoscale()
fig_wf.set_xdata(iter_time_plot)
fig_wf.set_ydata(wf)
ax_wf.relim()
ax_wf.autoscale()
fig1.canvas.draw()
fig1.canvas.flush_events
if (int(iter_time/dt)%psi_save_interval==0):
np.save(dir_name+'/psi-'+"{:.2f}".format(iter_time), psi)
np.save(dir_name+'/ex(t)',np.array(ex_occu))
np.save(dir_name+'/gs(t)',np.array(gs_occu))
if(t_initial==0):
np.save(dir_name+'/E(t)',np.array(E_t))
np.save(dir_name+'/final-wf-tf-' + str(tf_tot), psi)
np.save(dir_name+'/gs(t)',np.array(gs_occu))
np.save(dir_name+'/ex(t)',np.array(ex_occu))
np.save(dir_name+'/norm(t)',np.array(norm))
np.save(dir_name+'/time',np.array(iter_time_plot))
else:
np.save(dir_name+'/E(t)-from-'+str(t_initial),np.array(E_t))
np.save(dir_name+'/final-wf-tf-' + str(tf_tot)+'-from-'+str(t_initial), psi)
np.save(dir_name+'/gs(t)-from-'+str(t_initial),np.array(gs_occu))
np.save(dir_name+'/ex(t)-from-'+str(t_initial),np.array(ex_occu))
np.save(dir_name+'/norm(t)-from-'+str(t_initial),np.array(norm))
np.save(dir_name+'/time-from-'+str(t_initial),np.array(iter_time_plot))
call(['mv', dir_name, './psi-t-form-'+form+'-A-'+\
str(Amplitude)+'-tf-laser-'+str(int(tf_laser+t_initial))+'-tf-tot-'+str(int(tf_tot+t_initial))])
call(['rm', dir_name+'/ACTIVE'])
#time_start must be absolute time, not time index!!!!
#only supported for ramp lasers
@jit(nogil=True)
def new_shape_E(time_start, new_tf_laser, dt, A):
time_steps = int((new_tf_laser - time_start)/dt) + 2
Eform = np.zeros(time_steps)
for i in range(time_steps):
absolute_time = time_start + i*dt
if(absolute_time<10*optical_cycle):
Eform[i] = np.sin(2*np.pi*nu*absolute_time)*(absolute_time/(10.*optical_cycle))
#if the absolute time is above 10 optical cylce ramp up,
# as well as less than the duration we want the new laser pulse to be (i*dt=new_tf_laser)
# then we're in the full sine wave
elif(absolute_time>10*optical_cycle and absolute_time < new_tf_laser-10*optical_cycle):
Eform[i] = np.sin(2*np.pi*nu*absolute_time)
else:
Eform[i] = np.sin(2*np.pi*nu*absolute_time)*(new_tf_laser/(10*optical_cycle)-(absolute_time/(10*optical_cycle)))
return A*Eform
#}}}
#{{{ Electronic BO stuff
#{{{ H_el set up
#mu_e is 1 in BO approx
T_e1_BO = csr_matrix((-1/(2*e_spacing**2))*Lap_e1 + 0j)
T_e2_BO = csr_matrix((-1/(2*e_spacing**2))*Lap_e2 + 0j)
#relies on r1x and r2x being the same size
T_el_buff = np.zeros(psi0[:,:,0].shape) +0j
@jit(nogil=True)
def calc_Tel(psi_el, buff):
buff[:,:] = 0j
if (len(r1x) == len(r2x)):
for xi in range(len(r1x)):
buff[:,xi] += T_e1_BO.dot(psi_el[:,xi])
buff[xi,:] += T_e2_BO.dot(psi_el[xi,:])
else:
for xi in range(len(r2x)):
buff[:,xi] += T_e1_BO.dot(psi_el[:,xi])
for xi in range(len(r1x)):
buff[xi,:] += T_e2_BO.dot(psi_el[xi,:])
return buff
#need to deal with each subspecies interaction separately
#create kernels for each of these separately, store them in kernel array
#store as [Vee, Ve1n, Ve2n], add 1/R separately
#These kernels correctly reconstruct the potential as checked against Mathematica
@jit(nogil=True)
def construct_Vel(V_kernels):
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
#electron electron interaction
#SHOULD THERE BE A 1/2 HERE? I dont thing so since sum over i != j
V_kernels[0][r1i,r2i] += np.sqrt( (r1x[r1i] - r2x[r2i])**2 + Cr2)**(-1)
#grab the r1 electron nuclear interactions while looping over r1
for Ri in range(Ri_cut,len(Rx)):
V_kernels[1][r1i,Ri] += (-np.sqrt( (r1x[r1i] - (mu/M1)*Rx[Ri])**2 + Cr2)**(-1) -\
np.sqrt( (r1x[r1i] + (mu/M2)*Rx[Ri])**2 + Cr2)**(-1))
for Ri in range(Ri_cut,len(Rx)):
#grab the r2 electron nuclear interactions while looping over R
for r2i in range(0,len(r2x)):
V_kernels[2][r2i,Ri] += (-np.sqrt( (r2x[r2i] - (mu/M1)*Rx[Ri])**2 + Cr2)**(-1) -\
np.sqrt( (r2x[r2i] + (mu/M2)*Rx[Ri])**2 + Cr2)**(-1))
return V_kernels
Vel_kernels = [np.zeros((len(r1x),len(r2x))) + 0j, np.zeros((len(r1x),len(Rx))) + 0j,\
np.zeros((len(r2x),len(Rx))) + 0j]
Vel_kernels = construct_Vel(Vel_kernels)
#agrees with slower more direct method of matvec'ing
@jit(nogil=True)
def calc_Vel(psi_el, Vel_kernels, Ri):
#Since r1/r2 are independent subsystems, one has to apply the potential kernels separately
if(len(r1x)!=len(r2x)):
Ve1n_psi = np.zeros(psi_el.shape) + 0j
for i in range(len(r2x)):
Ve1n_psi[:,i] += Vel_kernels[1][:,Ri]*psi_el[:,i]
Ve2n_psi = np.zeros(psi_el.shape) + 0j
for i in range(len(r1x)):
Ve2n_psi[i,:] += Vel_kernels[2][:,Ri]*psi_el[i,:]
return Vel_kernels[0][:,:]*psi_el[:,:] + Ve1n_psi + 1./Rx[Ri]*psi_el[:,:] + Ve2n_psi
else:
Ven_psi = np.zeros(psi_el.shape) + 0j
for i in range(len(r2x)):
Ven_psi[:,i] += Vel_kernels[1][:,Ri]*psi_el[:,i]
Ven_psi[i,:] += Vel_kernels[2][:,Ri]*psi_el[i,:]
return Vel_kernels[0][:,:]*psi_el[:,:] + Ven_psi + 1./Rx[Ri]*psi_el[:,:]
@jit(nogil=True)
def cVel(Ri):
Vpsi = 1.0*np.zeros((len(r1x),len(r2x))) + 0.0j
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
#Vpsi[r1i,r2i] += psi_el[r1i,r2i]/np.sqrt( (r1x[r1i] - r2x[r2i])**2 + Cr2)
#Vpsi[r1i,r2i] -= psi_el[r1i,r2i]/np.sqrt( (r1x[r1i] - (mu/M1)*Rx[Ri])**2 + Cr2)
#Vpsi[r1i,r2i] -= psi_el[r1i,r2i]/np.sqrt( (r1x[r1i] + (mu/M2)*Rx[Ri])**2 + Cr2)
#Vpsi[r1i,r2i] -= psi_el[r1i,r2i]/np.sqrt( (r2x[r2i] - (mu/M1)*Rx[Ri])**2 + Cr2)
#Vpsi[r1i,r2i] -= psi_el[r1i,r2i]/np.sqrt( (r2x[r2i] + (mu/M2)*Rx[Ri])**2 + Cr2)
#Vpsi[r1i,r2i] += psi_el[r1i,r2i]/Rx[Ri]
Vpsi[r1i,r2i] += 1./np.sqrt( (r1x[r1i] - r2x[r2i])**2 + Cr2)
Vpsi[r1i,r2i] -= 1./np.sqrt( (r1x[r1i] - (mu/M1)*Rx[Ri])**2 + Cr2)
Vpsi[r1i,r2i] -= 1./np.sqrt( (r1x[r1i] + (mu/M2)*Rx[Ri])**2 + Cr2)
Vpsi[r1i,r2i] -= 1./np.sqrt( (r2x[r2i] - (mu/M1)*Rx[Ri])**2 + Cr2)
Vpsi[r1i,r2i] -= 1./np.sqrt( (r2x[r2i] + (mu/M2)*Rx[Ri])**2 + Cr2)
Vpsi[r1i,r2i] += 1./Rx[Ri]
return Vpsi
#}}}
#{{{ Normalization, H_el, calc_E_el, plotting, constructing
#define nomalization function over the electronic subsystem
@jit(nogil=True)
def N_el(ket):
return ket/np.sqrt( sum(sum(np.conj(ket)*ket))*e_spacing**2)
@jit(nogil=True)
def H_el(psi_el,Ri):
return calc_Vel(psi_el,Vel_kernels,Ri) + calc_Tel(psi_el,T_el_buff)
@jit(nogil=True)
def calc_E_el(psi_el,Ri):
return sum(sum(np.conj(psi_el)*H_el(psi_el,Ri)))*e_spacing**2
#normalize and return first order imaginary time expansion
@jit(nogil=True)
def imag_prop_el(psi_el, Ri, tau):
return N_el(psi_el - tau*H_el(psi_el,Ri))
@jit(nogil=True)
def inner_product_el(bra,ket):
return sum(sum(np.conj(bra)*ket))*e_spacing**2
@jit(nogil=True)
def filter_projection_el(psi, kets):
#where psi is the wavefunction to be filtered and ket is the ket to filter out
projection = np.zeros(psi.shape) +0j
for ket_i in kets:
projection += inner_product_el(ket_i,psi)*ket_i
return psi - projection
@jit(nogil=True)
def filtered_imag_prop_el(psi_el,Ri,tau, filters):
return N_el(psi_el - tau*filter_projection_el(H_el(filter_projection_el(psi_el,filters),Ri),filters))
#for each BO calculation start the initial electronic wavefunction as two gaussians centered around
# the nuclei at that given nuclear separation value
# note that this assumes the M1=M2
@jit(nogil=True)
def construct_electronic_wf(psi_el0, Ri):
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
psi_el0[r1i,r2i] = np.exp(-(r1x[r1i] - Rx[Ri]/2)**2/re_sig2)\
*np.exp(-(r2x[r2i] + Rx[Ri]/2)**2/re_sig2) \
+ np.exp(-(r2x[r2i] - Rx[Ri]/2)**2/re_sig2)\
*np.exp(-(r1x[r1i] + Rx[Ri]/2)**2/re_sig2) +0j
return N_el(psi_el0)
#for each BO calculation start the initial electronic wavefunction as two gaussians centered around
# the nuclei at that given nuclear separation value
#this is properly the first excited state, could do fancy footwork with cosines to get higher ones
@jit(nogil=True)
def construct_excited_electronic_wf(psi_el0, Ri):
for r1i in range(len(r1x)):
for r2i in range(len(r2x)):
psi_el0[r1i,r2i] =(np.exp(-(r1x[r1i] - Rx[Ri]/2)**2/re_sig2)\
*np.exp(-(r2x[r2i] + Rx[Ri]/2)**2/re_sig2)
- np.exp(-(r2x[r2i] - Rx[Ri]/2)**2/re_sig2)\
*np.exp(-(r1x[r1i] + Rx[Ri]/2)**2/re_sig2) ) +0j
return N_el(psi_el0)
@jit(nogil=True)
def BO_electronic_density(psi_el):
out = np.zeros(len(r1x))
psi_el2 = abs(psi_el)**2
out[:] = np.sum(psi_el2,0)
out[:] += np.sum(psi_el2,1)
return out*e_spacing
@jit(nogil=True)
def check_convergence_el(psi, Ri):
Hpsi = H_el(psi,Ri)
E = (sum(sum(np.conj(psi)*Hpsi)))*e_spacing**2
#difference vector
d_vec = Hpsi - E*psi
#distance from correct solution
d = np.sqrt(sum(sum(np.conj(d_vec)*d_vec))*e_spacing**2)
return d, E
#}}}
#{{{ SD and cgs
#Start with cgs for gs, later expand algorithm for any number of excited states
# to extend this to search for excited states, add an option for number of states,
# then when initializing the algorithm build a list for each up to the number of excited states
# then for loop over the number of states, embedding the while condition inside,
# making sure to filter each time!!
@jit(nogil=True)
def sd_el(Ri, tol, num_excited):
eigenstates = []
eigenvalues = []
for i in range(num_excited+1):
#make an initial guess
#may need a random component to access different directions of function space
if(i==0):
psi_m = construct_electronic_wf(np.zeros((len(r1x),len(r2x))),Ri) + 0j
if(i>0):
#could write in better excited states for greater than first
psi_m = N_el(filter_projection_el(\
construct_excited_electronic_wf(np.zeros((len(r1x),len(r2x))),Ri) + 0j, \
eigenstates))
conj = np.zeros(psi_m.shape) +0j
#calculate the 0th iteration eigenvalue guess
lam = calc_E_el(psi_m,Ri)
r = lam*psi_m - H_el(psi_m,Ri)
if(i>0):
r = filter_projection_el(r,eigenstates)
alpha = inner_product_el(r,r)/calc_E_el(r,Ri)
psi_m = psi_m + alpha*r
#check for convergence
dist, lam = check_convergence_el(psi_m,Ri)
#enter algorithm
while (dist > tol):
r = lam*psi_m - H_el(psi_m,Ri)
if(i>0):
r = filter_projection_el(r,eigenstates)
alpha = inner_product_el(r,r)/calc_E_el(r,Ri)
psi_m = psi_m + alpha*r
#chec convergence, update lambda
dist, lam = check_convergence_el(psi_m, Ri)
print('dist=',dist)
print(i, ' !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!state done!')
eigenstates.append(psi_m)
eigenvalues.append(lam)
return eigenvalues, eigenstates
@jit(nogil=True)
def cg_el(Ri,tol):
#make an initial guess
#may need a random component to access different directions of function space
psi_m = construct_electronic_wf(np.zeros((len(r1x),len(r2x))),Ri) + 0j
lam = calc_E_el(psi_m1,Ri)
d = lam*psi_m - H_el(psi_m,Ri)
r = d
ri_inner = inner_product_el(r,r)
beta = 0
dist, lam = check_convergence_el(psi_m,Ri)
while (dist > tol):
Hd = H_el(d,Ri)
alpha = inner_product_el(r,r)/inner_product_el(d,Hd)
psi_m = psi_m + alpha*d
ri_inner = inner_product_el(r,r)
r = r - alpha*Hd
beta = inner_product_el(r,r)/ri_inner
d = r + beta*d
dist,lam = check_convergence_el(psi_m,Ri)
print('dist = ', dist)
return lam
def sd_BO(tol, num_excited):
BO_energies = []
BO_states = []
for i in range(num_excited+1):
BO_energies.append([])
BO_states.append([])
for Ri in range(len(Rx)):
energies, states = sd_el(Ri,tol,num_excited)
for i in range(num_excited+1):
BO_energies[i].append(energies[i])
BO_states[i].append(states[i])
return BO_energies, BO_states
def cg_BO(tol):
for Ri in range(len(Rx)):
BO_0.append(cg_el(Ri,tol))
#}}}
#{{{ BO imaginary time prop
#could save electronic eigenstates at each Ri as well rather easily
#somehow this is faster without jit
#@jit(nogil=True)
def calculate_BO(tau, tol):
psi_el_buffer = np.zeros( (len(r1x), len(r2x)) )
#ones = np.ones(psi_el_buffer.shape) + 0j #/ (sum(sum(np.ones(psi_el_buffer.shape)))*e_spacing**2)
for Ri in range(len(Rx)):
#imaginary time relax the electronic system to a tolerance, record the energy value.
#then filter the electronic gs out to get the first excited electronic BO state, and its energy value.
psi_el = construct_electronic_wf(psi_el_buffer, Ri) + 0j
#psi_el = construct_excited_electronic_wf(psi_el_buffer, Ri) + 0j
#psi_el = ones
psi_el0 = psi_el
dis, E0 = check_convergence_el(psi_el0,Ri)
while (dis > tol):
psi_el0 = imag_prop_el(psi_el0, Ri, tau)
dis, E0 = check_convergence(psi_el0, Ri)
print('E0 dis = ',dis)
BO_0.append(E0)
plt.plot(r1x,BO_electronic_density(psi_el0))
plt.show()
"""
#calculate the first excited state
psi_el1 = construct_excited_electronic_wf(psi_el_buffer, Ri) + 0j
#psi_el1 = psi_el
dis, E1 = check_convergence(psi_el1,Ri)
while (dis > tol):
psi_el1 = filtered_imag_prop_el(psi_el1, Ri, tau, [psi_el0])
dis, E1 = check_convergence(psi_el1, Ri)
print('E1 dis = ',dis)
BO_1.append(E1)
#calculate the second excited state
psi_el2 = construct_excited_electronic_wf(psi_el_buffer, Ri) + 0j
dis, E2 = check_convergence(psi_el2,Ri)
while (dis > tol):
psi_el2 = imag_prop_el(psi_el2, Ri, tau)
dis, E2 = check_convergence(psi_el2, Ri)
print('E2 dis = ',dis)
BO_2.append(E2)
"""
"""
#calculate third excited state
E1 = calc_E_el(psi_el, Ri)
psi_el3 = filtered_imag_prop_el(psi_el, Ri, tau, [psi_el0, psi_el1, psi_el2])
E2 = calc_E_el(psi_el3, Ri)
#converge the electronic eigenvalue problem to the ground state
counter = 0
while (abs(E2 - E1) > tol):
print(abs(E2-E1))
counter += 1
psi_el3 = filtered_imag_prop_el(psi_el1, Ri, tau, [psi_el0, psi_el1,psi_el2])
if(counter%2==0):
E2 = calc_E_el(psi_el3, Ri)
else:
E1 = calc_E_el(psi_el3, Ri)
#plt.plot(r1x,BO_electronic_density(psi_el0))
#plt.plot(r1x,BO_electronic_density(psi_el1))
#plt.show()
BO_3.append(min([E1,E2]))
"""
print('BO relax is ', float(Ri)/len(Rx)*100, '% of the way done')
#}}}
#{{{ BO occupations
@jit(nogil=True)
def BO_occu(psi_full, psi_els):
#note that psi els are organized as such
# psi_els[gs or excited, Nuclear index, r1x, r2x]
egs_on_psi = np.zeros(len(Rx)) + 0j
eex_on_psi = np.zeros(len(Rx)) + 0j
for Ri in range(len(Rx)):
egs_on_psi[Ri] = sum(sum(np.conj(psi_els[0,Ri])*psi_full[:,:,Ri]))*e_spacing**2
eex_on_psi[Ri] = sum(sum(np.conj(psi_els[1,Ri])*psi_full[:,:,Ri]))*e_spacing**2
gs = np.abs(sum(np.conj(egs_on_psi)*egs_on_psi)*n_spacing)**2
ex = np.abs(sum(np.conj(eex_on_psi)*eex_on_psi)*n_spacing)**2
return gs, ex
#}}}
@jit(nogil=True)
def D_el(psi_el,Ri):
Dpsi = np.zeros(psi_el.shape)+0j
for r2i in range(len(r2x)):
Dpsi[:,r2i] -= psi_el[:,r2i]*r1x[:]
for r1i in range(len(r1x)):
Dpsi[r1i,:] -= psi_el[r1i,:]*r2x[:]
return Dpsi
#}}}
| livelyke/ICWF_python | functions.py | functions.py | py | 47,083 | python | en | code | 0 | github-code | 13 |
69852407378 | from lib import *
from material import *
from sphere import *
from math import pi, tan
from intersect import *
from light import *
import random
BLACK = color(0, 0, 0)
WHITE = color(255, 255, 255)
BACKGROUND = color(200, 200, 190)
MAX_RECURSION_DEPTH = 3
class Raycaster(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.background_color = BLACK
self.scene = []
self.glClear()
def glClear(self):
self.pixels = [
[self.background_color for x in range(self.width)]
for y in range(self.height)
]
def finish(self, filename):
f = open(filename, 'bw')
#file header
f.write(char('B'))
f.write(char('M'))
f.write(dword(14 + 40 + self.width * self.height * 3))
f.write(dword(0))
f.write(dword(14 + 40))
# image loader
f.write(dword(40))
f.write(dword(self.width))
f.write(dword(self.height))
f.write(word(1))
f.write(word(24))
f.write(dword(0))
f.write(dword(self.width * self.height * 3))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
f.write(dword(0))
# pixel data
for x in range(self.width):
for y in range(self.height):
f.write(self.pixels[x][y].toBytes())
f.close()
def point(self, x, y, color = None):
try:
self.pixels[y][x] = color or self.background_color
except:
pass
def scene_intersect(self, orig, direction):
zbuffer = float('inf')
material = None
intersect = None
for obj in self.scene:
hit = obj.ray_intersect(orig, direction)
if hit is not None:
if hit.distance < zbuffer:
zbuffer = hit.distance
material = obj.material
intersect = hit
return material, intersect
def cast_ray(self, orig, direction, recursion=0):
material, intersect = self.scene_intersect(orig, direction)
if material is None or recursion >= MAX_RECURSION_DEPTH: # break recursion of reflections after n iterations
return self.background_color
offset_normal = mul(intersect.normal, 1.1)
if material.albedo[2] > 0:
reverse_direction = mul(direction, -1)
reflect_dir = reflect(reverse_direction, intersect.normal)
reflect_orig = sub(intersect.point, offset_normal) if dot(reflect_dir, intersect.normal) < 0 else sum(intersect.point, offset_normal)
reflect_color = self.cast_ray(reflect_orig, reflect_dir, recursion + 1)
else:
reflect_color = color(0, 0, 0)
if material.albedo[3] > 0:
refract_dir = refract(direction, intersect.normal, material.refractive_index)
refract_orig = sub(intersect.point, offset_normal) if dot(refract_dir, intersect.normal) < 0 else sum(intersect.point, offset_normal)
refract_color = self.cast_ray(refract_orig, refract_dir, recursion + 1)
else:
refract_color = color(0, 0, 0)
light_dir = norm(sub(self.light.position, intersect.point))
light_distance = length(sub(self.light.position, intersect.point))
shadow_orig = sub(intersect.point, offset_normal) if dot(light_dir, intersect.normal) < 0 else sum(intersect.point, offset_normal)
shadow_material, shadow_intersect = self.scene_intersect(shadow_orig, light_dir)
shadow_intensity = 0
if shadow_material and length(sub(shadow_intersect.point, shadow_orig)) < light_distance:
shadow_intensity = 0.9
intensity = self.light.intensity * max(0, dot(light_dir, intersect.normal)) * (1 - shadow_intensity)
reflection = reflect(light_dir, intersect.normal)
specular_intensity = self.light.intensity * (
max(0, -dot(reflection, direction))**material.spec
)
diffuse = material.diffuse * intensity * material.albedo[0]
specular = color(255, 255, 255) * specular_intensity * material.albedo[1]
reflection = reflect_color * material.albedo[2]
refraction = refract_color * material.albedo[3]
return diffuse + specular + reflection + refraction
def render(self):
fov = int(pi/2)
for y in range(self.height):
for x in range(self.width):
i = (2 * (x + 0.5)/self.width - 1) * tan(fov/2) * self.width/self.height
j = (2 * (y + 0.5)/self.height - 1) * tan(fov/2)
direction = norm(V3(i, j, -1))
# self.pixels[y][x] = self.cast_ray(V3(0,0,0), direction)
# con este sale el efecto de luz roja
ver_rojo = self.cast_ray(V3(0.3, 0, 0), direction)
luz_roja = (ver_rojo * 0.5 + color(100, 0, 0)) if ver_rojo != self.background_color else ver_rojo
# con este sale el efecto de luz azul
ver_azul = self.cast_ray(V3(-0.3, 0, 0), direction)
luz_azul = (ver_azul * 0.5 + color(0, 0, 100)) if ver_azul != self.background_color else ver_azul
luz_total = luz_roja + luz_azul
self.pixels[y][x] = luz_total
r = Raycaster(2000, 2000)
r.light = Light(
position=V3(40, 20, 20),
intensity=1.5
)
r.scene = [
# esfera de navidad
# Sphere(V3(-2.5, -1, -10), 2, navidad1),
Sphere(V3(0, -1, -10), 2, navidad2),
# # cabeza
# Sphere(V3(-2.5, 2, -10), 1.5, cuerpo1),
Sphere(V3(0, 2, -10), 1.5, cuerpo2),
# # orejas
# Sphere(V3(-3.5, 3.5, -10), 0.6, cuerpo1),
# Sphere(V3(-1.5, 3.5, -10), 0.6, cuerpo1),
Sphere(V3(1, 3.5, -10), 0.6, cuerpo2),
Sphere(V3(-1, 3.5, -10), 0.6, cuerpo2),
# # brazos
# Sphere(V3(-3.5, 0, -8), 0.6, cuerpo1),
Sphere(V3(1, 0, -8), 0.6, cuerpo2),
# Sphere(V3(-1, 0, -8), 0.6, cuerpo1),
Sphere(V3(-1.5, 0, -8), 0.6, cuerpo2),
# # piernas
# Sphere(V3(-3.5, -2, -8), 0.6, cuerpo1),
Sphere(V3(1, -2, -8), 0.6, cuerpo2),
# Sphere(V3(-1, -2, -8), 0.6, cuerpo1),
Sphere(V3(-1.5, -2, -8), 0.6, cuerpo2),
# # nariz
# Sphere(V3(-2.5, 1.5, -8.5), 0.6, cuerpo1),
Sphere(V3(0, 1.5, -8.5), 0.6, cuerpo2),
# # punta nariz
# Sphere(V3(-2.3, 1.25, -7.5), 0.1, negro),
Sphere(V3(-0.7, 1.25, -7.5), 0.1, negro),
# # ojos
# Sphere(V3(-3, 2.4, -8.5), 0.2, negro),
# Sphere(V3(-2, 2.4, -8.5), 0.2, negro),
Sphere(V3(0.5, 2.4, -8.5), 0.2, negro),
Sphere(V3(-0.5, 2.4, -8.5), 0.2, negro),
]
# r.scene = [
# Sphere(V3(0, 0, 0), 1, cuerpo1)
# ]
r.render()
r.finish('out.bmp') | Crismaria11/Proyecto2_Graficas | Lab3/ray.py | ray.py | py | 6,150 | python | en | code | 0 | github-code | 13 |
21397096723 | from django.urls import path
from django.urls.conf import include
from .views import register, recipe, step, next_step, change_sender, current_step, reset
urlpatterns = [
path('register/', register, name='register'),
path('step/', step, name='step'),
path('next_step/', next_step, name='next_step'),
path('change_sender/', change_sender, name='change_sender'),
path('current_step/', current_step, name='current_step'),
path('current_recipe/', recipe, name='recipe'),
path('reset/', reset, name='reset')
] | fuurin/tebula | tebula/urls.py | urls.py | py | 512 | python | en | code | 1 | github-code | 13 |
5940324443 | from actions import EmailAction
def main():
# Replace with a valid recipient email address
email_address = "aubnmaiml@gmail.com"
# Create an instance of EmailAction
email_action = EmailAction()
# Call the send_email function and check the result
result = email_action.send_email(email_address)
if result:
print("Email sent successfully!")
else:
print("Failed to send email.")
if __name__ == "__main__":
main()
| itsashwanianand/Sending-Automated-mail--RASA- | Rasa_Projects/install_demo/actions/email_test.py | email_test.py | py | 486 | python | en | code | 0 | github-code | 13 |
40837602879 | import discord
import logging
from discord.ext import commands
# Log SetUp
logging.basicConfig(level=logging.INFO, filename='bot.log', filemode="w")
class Basics(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def Help(self, ctx):
embed = discord.Embed(title="Bot'sy Help", description = "Everything you need to know", color = 0x00ff00)
embed.add_field(name=">info <user>", value = "Give user's info", inline=False)
embed.add_field(name=">kick <user>", value ="Kick the hell out of someone", inline=False)
embed.add_field(name=">someone <user>", value ="Tag a connected person of the server, BOT not included.", inline=False)
embed.add_field(name=">cheatox", value = "no one will notice", inline = False)
embed.add_field(name=">poll <choice1> <emoji1> <choice2> <emoji2> ... ",value = "Make a poll", inline = False)
embed.add_field(name=">settle <list of user>", value = "Randomly choose one of the user", inline = False)
embed.add_field(name=">dalle \"<prompt>\" [Optional]<number_pics>", value = "Create a DALL-E generated image from your prompt", inline = False)
await ctx.send(embed=embed)
@commands.command()
async def hello(self, ctx):
await ctx.send("Hello wonderful perso")
@commands.command()
async def info(self, ctx, user: discord.Member):
embed = discord.Embed(title="{}'s info".format(user.name), description = "Here's what I could find.", color = 0x00ff00)
embed.add_field(name="Name", value = user.name, inline=True)
embed.add_field(name="ID", value = user.id, inline=True)
embed.add_field(name="Status", value = user.status, inline=True)
embed.add_field(name="Highest role", value = user.top_role)
embed.add_field(name="Joined", value = user.joined_at)
embed.set_thumbnail(url= user.avatar.url)
await ctx.send(embed = embed)
async def setup(bot):
await bot.add_cog(Basics(bot)) | Viri0x/DiscordBot | cogs/basics.py | basics.py | py | 2,024 | python | en | code | 0 | github-code | 13 |
15236515112 | import stackusingarray.ArrayStack
def is_matched(string):
left = '({['
right = ')}]'
S = stackusingarray.ArrayStack.ArrayStack()
for c in string:
if c in left:
S.push(c)
elif c in right:
if S.is_empty():
return False
elif right.index(c) != left.index(S.pop()):
return False
return S.is_empty()
if __name__ == "__main__":
string = '(((((){})[()])())[])'
print(is_matched(string))
string = '(((()'
print(is_matched(string))
| nssathish/python-dsa | stack/MatchingParenthesis.py | MatchingParenthesis.py | py | 547 | python | en | code | 0 | github-code | 13 |
74564861138 | #!/usr/bin/env python
"""
_File_t_
Unit tests for the WMBS File class.
"""
from builtins import range
import threading
import unittest
import nose
from WMCore.DAOFactory import DAOFactory
from WMCore.DataStructs.Run import Run
from WMCore.Services.UUIDLib import makeUUID
from WMCore.WMBS.File import File
from WMQuality.TestInit import TestInit
class CursorLeakTest(unittest.TestCase):
def setUp(self):
"""
_setUp_
Setup the database and logging connection. Try to create all of the
WMBS tables. Also add some dummy locations.
"""
self.testInit = TestInit(__file__)
self.testInit.setLogging()
self.testInit.setDatabaseConnection()
self.testInit.setSchema(customModules=["WMCore.WMBS"],
useDefault=False)
myThread = threading.currentThread()
daofactory = DAOFactory(package="WMCore.WMBS",
logger=myThread.logger,
dbinterface=myThread.dbi)
locationAction = daofactory(classname="Locations.New")
locationAction.execute(siteName="se1.cern.ch")
locationAction.execute(siteName="se1.fnal.gov")
return
def tearDown(self):
"""
_tearDown_
Drop all the WMBS tables.
"""
self.testInit.clearDatabase()
def testCursor(self):
"""
_testCursor_
test the cursor closing is really affected
create 100 files with 5 parents and loop 100 times.
If the cursors are exhausted will crash.?
TODO: improve for more effective testing.
"""
raise nose.SkipTest
fileList = []
parentFile = None
for i in range(100):
testFile = File(lfn="/this/is/a/lfn%s" % i, size=1024, events=10,
checksums={"cksum": "1"})
testFile.addRun(Run(1, *[i]))
testFile.create()
for j in range(5):
parentFile = File(lfn="/this/is/a/lfnP%s" % j, size=1024,
events=10, checksums={"cksum": "1"})
parentFile.addRun(Run(1, *[j]))
parentFile.create()
testFile.addParent(parentFile['lfn'])
fileList.append(testFile)
for i in range(100):
for file in fileList:
file.loadData()
file.getAncestors(level=2)
file.getAncestors(level=2, type="lfn")
return
def testLotsOfAncestors(self):
"""
_testLotsOfAncestors_
Create a file with 15 parents with each parent having 100 parents to
verify that the query to return grandparents works correctly.
"""
raise nose.SkipTest
testFileA = File(lfn="/this/is/a/lfnA", size=1024, events=10,
checksums={"cksum": "1"}, locations="se1.fnal.gov")
testFileA.create()
for i in range(15):
testParent = File(lfn=makeUUID(), size=1024, events=10,
checksums={"cksum": "1"}, locations="se1.fnal.gov")
testParent.create()
testFileA.addParent(testParent["lfn"])
for i in range(100):
testGParent = File(lfn=makeUUID(), size=1024, events=10,
checksums={"cksum": "1"}, locations="se1.fnal.gov")
testGParent.create()
testParent.addParent(testGParent["lfn"])
assert len(testFileA.getAncestors(level=2, type="lfn")) == 1500, \
"ERROR: Incorrect grand parents returned"
return
if __name__ == "__main__":
unittest.main()
| dmwm/WMCore | test/python/WMCore_t/WMBS_t/CursorLeak_t.py | CursorLeak_t.py | py | 3,717 | python | en | code | 44 | github-code | 13 |
36077890175 | #conding = utf-8
#author : You Zhang
from distutils.core import setup
import py2exe
options = {"py2exe":
{"compressed": 1,
"optimize": 2,
"ascii": 1,
"bundle_files": 1 }
}
setup(
options = options,
zipfile=None,
console=[{"script": "dataTrace_v3.py", "icon_resources": [(0, "asset\sbell_new.ico")] }]
)
| YouZhang/LTESystemTest | dataTracer/setup.py | setup.py | py | 346 | python | en | code | 0 | github-code | 13 |
42855143231 | from django.core.validators import MinLengthValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
from ..flower.models import Product
from shared.mixins.model_utils import CreatedUpdateMixins
class Recommendation(CreatedUpdateMixins):
"""
Recommendation model
attributes:
name (str): name of recommendation
active (bool): active recommendation or not
product (class Product): communication with the Product model
created (datetime): data of create recommendation
updated (datetime): data of update recommendation
"""
name = models.CharField(
max_length=250,
validators=[MinLengthValidator(3)],
verbose_name=_('name'),
help_text=_('name of recommendation')
)
active = models.BooleanField(
verbose_name=_('active'),
help_text=_('active recommendation or not'),
default=True
)
product = models.ManyToManyField(
Product,
verbose_name=_('product'),
help_text=_('products in the recommendation')
)
class Meta(object):
verbose_name = _('recommendation')
verbose_name_plural = _('Recommendations')
ordering = ['-created']
def __str__(self) -> str:
"""class method returns the recommendation in string representation"""
return self.name
| Sergey-sg/flowers_shop | backend/apps/recommendation/models.py | models.py | py | 1,381 | python | en | code | 0 | github-code | 13 |
20380811935 | import mbuild as mb
class B(mb.Compound):
"""An coarse grained particle called B(arbitrarily) with two ports one facing up and one facing down."""
mass = 1.0
def __init__(self):
super(B, self).__init__()
self.add(mb.Particle(name='B'))
self.add(mb.Port(anchor=self[0]), 'up')
mb.translate(self['up'], [0, 0.07, 0])
self.add(mb.Port(anchor=self[0]), 'down')
mb.translate(self['down'], [0, -0.07, 0])
| palmertr/epoxpy | epoxpy/lib/b.py | b.py | py | 465 | python | en | code | 0 | github-code | 13 |
18275453946 | #!/usr/bin/env python
import elasticsearch
import json
import logging
import sys
import copy
from apscheduler.scheduler import Scheduler
from collections import Counter
from assemblyline.common.isotime import now_as_iso
from assemblyline.al.common import forge, log as al_log
from assemblyline.al.common.queue import CommsQueue
from threading import Lock
def cleanup_metrics(input_dict):
output_dict = {}
for k, v in input_dict.iteritems():
items = k.split(".")
parent = output_dict
for i in items:
if i not in parent:
if items.index(i) == (len(items) - 1):
# noinspection PyBroadException
try:
parent[i] = int(v)
except: # pylint:disable=W0702
if v == "true":
parent[i] = True
elif v == "false":
parent[i] = False
else:
parent[i] = v
break
else:
parent[i] = {}
parent = parent[i]
return output_dict
class MetricsServer(object):
SRV_METRICS = ['svc.cache_hit', 'svc.cache_miss', 'svc.cache_skipped', 'svc.execute_start', 'svc.execute_done',
'svc.execute_fail_recov', 'svc.execute_fail_nonrecov', 'svc.job_scored', 'svc.job_not_scored']
INGEST_METRICS = ['ingest.duplicates', 'ingest.bytes_ingested', 'ingest.submissions_ingested', 'ingest.error',
'ingest.timed_out', 'ingest.submissions_completed', 'ingest.files_completed',
'ingest.bytes_completed', 'ingest.skipped', 'ingest.whitelisted']
DISPATCH_METRICS = ['dispatch.files_completed']
ALERT_METRICS = ['alert.received', 'alert.err_no_submission', 'alert.heavy_ignored', 'alert.proto_http',
'alert.proto_smtp', 'alert.proto_other', 'alert.saved']
METRIC_TYPES = {'alerter': ALERT_METRICS,
'ingester': INGEST_METRICS,
'dispatcher': DISPATCH_METRICS,
'service': SRV_METRICS}
def __init__(self, metrics_channel_name, logger, elastic_ip_p, elastic_port_p):
self.metrics_channel_name = metrics_channel_name
self.elastic_ip = elastic_ip_p
self.elastic_port = elastic_port_p
self.scheduler = Scheduler()
self.metrics_queue = None
self.es = None
self.log = logger
self.METRIC_TYPES.update(forge.get_config().core.metricsd.extra_metrics)
self.counters_lock = Lock()
self.counters = {}
def serve_forever(self):
self.metrics_queue = CommsQueue(self.metrics_channel_name)
self.es = elasticsearch.Elasticsearch([{'host': self.elastic_ip, 'port': self.elastic_port}])
self.scheduler.add_interval_job(
self._create_aggregated_metrics,
seconds=60, kwargs={"my_logger": self.log})
self.scheduler.start()
while True:
for msg in self.metrics_queue.listen():
if not msg or msg.get('type', None) != 'message':
continue
metrics = json.loads(msg['data'])
metrics_name = metrics.pop('name', None)
metrics_type = metrics.pop('type', None)
metrics_host = metrics.pop('host', None)
_ = metrics.pop('instance', None)
if not metrics_name or not metrics_type or not metrics_host:
continue
with self.counters_lock:
if (metrics_name, metrics_type, metrics_host) not in self.counters:
self.counters[(metrics_name, metrics_type, metrics_host)] = Counter(metrics)
else:
self.counters[(metrics_name, metrics_type, metrics_host)] += Counter(metrics)
def _create_aggregated_metrics(self, my_logger):
my_logger.info("Copying counters.")
with self.counters_lock:
counter_copy = copy.deepcopy(self.counters)
self.counters = {}
my_logger.info("Aggregating metrics.")
timestamp = now_as_iso()
for component, counts in counter_copy.iteritems():
component_name, component_type, component_host = component
output_metrics = {'name': component_name,
'type': component_type,
'host': component_host}
if component_type in self.METRIC_TYPES:
output_metrics.update({k: counts.get(k, 0) for k in self.METRIC_TYPES[component_type]})
else:
my_logger.info("Skipping unknown component type: {cpt}".format(cpt=component_type))
continue
output_metrics['timestamp'] = timestamp
output_metrics = cleanup_metrics(output_metrics)
my_logger.info(output_metrics)
try:
self.es.create("al_metrics-%s" % timestamp[:10].replace("-", "."), component_type, output_metrics)
except Exception as e:
my_logger.exception(e)
my_logger.info("Metrics aggregated... Waiting for next run.")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
config = forge.get_config()
al_log.init_logging('metricsd')
log = logging.getLogger('assemblyline.metricsd')
elastic_ip = config.get('logging', {}).get('logserver', {}).get('node', None)
elastic_port = config.get('logging', {}).get('logserver', {}).get('elastic', {}).get('port', 9200)
if not elastic_ip or not elastic_port:
log.error("Elasticsearch cluster not configured in the seed. There is no need to gather stats on this box.")
sys.exit(1)
mserver = MetricsServer('SsMetrics', log, elastic_ip, elastic_port)
mserver.serve_forever()
| deeptechlabs/cyberweapons | assemblyline/assemblyline/al/run/admin/metricsd.py | metricsd.py | py | 5,947 | python | en | code | 78 | github-code | 13 |
8012748764 | from django.shortcuts import render, redirect
from numpy import array
from .models import Prescription, Approval, CustomerPrescription
from django.http import JsonResponse
import json
from .utils import viewAnnotation, scrapeMedicineImage, sendTextWhatsapp
import boto3
from .utils import convert,calculateConfidence,isSimilarImage,convertJson,CustomerConvert
from decouple import config
from PIL import Image
import img2pdf
import os
from fpdf import FPDF
import cv2
from django.contrib.auth import get_user_model
User = get_user_model()
ACCESS_KEY_ID = config('ACCESS_KEY_ID')
ACCESS_SECRET_KEY = config('ACCESS_SECRET_KEY')
s3 = boto3.client('s3',aws_access_key_id = ACCESS_KEY_ID,aws_secret_access_key = ACCESS_SECRET_KEY)
textract = boto3.client('textract',aws_access_key_id=ACCESS_KEY_ID,aws_secret_access_key = ACCESS_SECRET_KEY, region_name='us-west-2')
BUCKET_NAME = config('BUCKET_NAME')
comprehendmedical = boto3.client('comprehendmedical', aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key = ACCESS_SECRET_KEY, region_name='us-west-2')
# Create your views here.
def homepage(request):
if request.user.is_authenticated:
return render(request, 'pages/homepage.html')
else:
return redirect('login')
def uploadPrescription(request):
if request.user.is_authenticated:
if request.method == 'GET':
return render(request, 'pages/uploadPrescription.html')
elif request.method == 'POST':
image = request.FILES['prescription_image']
prescriptionList = Prescription.objects.all()
obj = Prescription(uploaded_by=request.user, image=image)
obj.save()
flag = False
flagItem = ""
for i in range(len(prescriptionList) - 1):
if(isSimilarImage(prescriptionList[i].image, obj.image)):
flag = True
flagItem = prescriptionList[i]
break
# print(flagItem)
if flag:
# print("asuchi")
obj.annotation = convertJson((str(obj.image).split("/"))[1],flagItem.annotation)
obj.save()
else:
# print("else asila")
predictPrescription(request, obj.id)
users = list(User.objects.all())
for user in users:
x = Approval(prescription = obj,checkedBy = user)
x.save()
return redirect('singleViewPres', prescription_id=obj.id)
else:
return redirect('login')
def viewPrescription(request):
if request.user.is_authenticated:
search = ""
result = Prescription.objects.all()
prescriptions_containing_search = []
if 'search' in request.POST:
search = request.POST['search'].lower()
for prescription in result:
if search in (str(prescription.annotation).lower() + prescription.uploaded_by.username.lower()):
prescriptions_containing_search.append(prescription)
else:
prescriptions_containing_search = result
data = {
'prescriptions' : prescriptions_containing_search,
'searched' : search
}
return render(request, 'pages/viewPrescription.html', context=data)
else:
return redirect('login')
digitised_prescriptionImage_dir ='DigitizedPrescriptionImage/'
digitised_prescriptionImagePdf_dir ='DigitizedPrescriptionImagePdf/'
digitised_prescriptionPdf_dir = 'DigitizedPrescriptionPdf/'
def visualizeAnnotation(request, prescription_id):
if request.user.is_authenticated:
prescription = Prescription.objects.get(id=prescription_id)
annotations = prescription.annotation
annotated_image, digitized_image,x = viewAnnotation(annotations, image_path = prescription.image.url)
# create directories if do not exist
if not os.path.exists(digitised_prescriptionImage_dir):
os.makedirs(digitised_prescriptionImage_dir)
if not os.path.exists(digitised_prescriptionImagePdf_dir):
os.makedirs(digitised_prescriptionImagePdf_dir)
if not os.path.exists(digitised_prescriptionPdf_dir):
os.makedirs(digitised_prescriptionPdf_dir)
#img2pdf Code
url = prescription.image.url
url = url.split('/')[-1]
im = Image.fromarray(x)
im.save(os.path.join(digitised_prescriptionImage_dir+str(url)))
pdfdata = img2pdf.convert(digitised_prescriptionImage_dir+url)
file = open(digitised_prescriptionImagePdf_dir + url.split('.')[0]+'.pdf','wb')
file.write(pdfdata)
file.close()
prescription.digitzedImagePdf = digitised_prescriptionImagePdf_dir + url.split('.')[0]+'.pdf'
prescription.save()
#fpdf code
img = cv2.imread(str(prescription.image))
height, width = img.shape[0], img.shape[1]
pdf = FPDF('P','mm',[width,height])
pdf.add_page()
for annotation in annotations[prescription.image.url+"/-1"]['regions']:
height_of_box = annotation["shape_attributes"]["height"]
width_of_box = annotation["shape_attributes"]["width"]
fontScale = height_of_box / width_of_box
if fontScale > 0.5:
fontScale = 1.5
else:
fontScale = 1
pdf.set_font("Arial", size = 64*fontScale)
pdf.set_xy(annotation['shape_attributes']['x'],annotation['shape_attributes']['y']/1.33)
pdf.cell(annotation['shape_attributes']['width'], annotation['shape_attributes']['height'], txt = annotation['region_attributes']['text'])
pdf.output(digitised_prescriptionPdf_dir + url.split('.')[0]+'.pdf')
prescription.digitzedPdf = digitised_prescriptionPdf_dir + url.split('.')[0]+'.pdf'
prescription.save()
############################
context = {
'prescription': prescription,
'annotated_image_uri': annotated_image,
'digitised_image_uri': digitized_image,
'digitised_image_uri_pdf' : prescription.digitzedImagePdf.url,
'digitised_pdf_uri' : prescription.digitzedPdf.url
# 'pdf_path' : os.path.join(digitised_prescriptionPdf_dir , url.split('.')[0]+'.pdf'),
# 'pdf_name' : url.split('.')[0]+'.pdf'
}
return render(request, 'pages/visualise.html', context=context)
else:
return redirect('login')
def Prescriptions(request):
if request.user.is_authenticated:
return render(request, 'pages/prescriptions.html')
else:
return redirect('login')
# def Dashboard(request):
# if request.user.is_authenticated:
# return render(request, 'pages/dashboard.html')
# else:
# return redirect('login')
def addMedication(request, prescription_id):
if request.user.is_authenticated:
prescription = Prescription.objects.get(id=prescription_id)
annotation = Prescription.objects.get(id=prescription_id).annotation
url = prescription.image.url+"/-1"
res = ''
PROTECTED_HEALTH_INFORMATION = []
info = {}
Medication = {}
med=[]
c = []
ph=[]
f=[]
test_treatment = []
medicalCondition = []
Anatomy = []
if len(annotation[url]['regions']):
for r in annotation[url]['regions']:
res+=" "+r['region_attributes']['text']
# print("Extracted Text =====> ", res)
result = comprehendmedical.detect_entities(Text= res)
entities = result['Entities']
# print(entities)
for key in entities:
if key['Category'] == 'PROTECTED_HEALTH_INFORMATION':
ph.append(key['Text'])
ph.append(key['Type'])
f.append(ph)
ph=[]
elif key['Category'] == 'MEDICATION':
med.append(key['Text'])
med.append('N.A')
med.append('N.A.')
dosage = -1
frequency = -1
if 'Attributes' in key:
for i in key['Attributes']:
if i['Type'] == 'DOSAGE':
dosage = i['Text']
med[1]=i['Text']
elif i['Type'] == 'FREQUENCY':
frequency = i['Text']
med[2]=i['Text']
c.append(med)
med=[]
if key['Text'] not in Medication:
Medication[key['Text']] = [dosage,frequency]
elif key['Category'] == 'TEST_TREATMENT_PROCEDURE':
test_treatment.append(key['Text'])
elif key['Category'] == 'MEDICAL_CONDITION':
medicalCondition.append(key['Text'])
elif key['Category'] == 'ANATOMY':
Anatomy.append(key['Text'])
prescription.medication = Medication
prescription.save()
context={
'med':c,
'protected_health_info' : f,
'test_treatment' : test_treatment,
'medicalCondition' : medicalCondition,
'Anatomy' : Anatomy
}
# print(PROTECTED_HEALTH_INFORMATION)
# print(Medication)
return render(request, 'pages/medication.html',context=context)
else:
return redirect('login')
def singleView(request, prescription_id):
if request.user.is_authenticated:
if Prescription.objects.get(id=prescription_id).medication:
p=True
else:
p=False
prescription = Prescription.objects.get(id=prescription_id)
annotation = Prescription.objects.get(id=prescription_id).annotation
url = prescription.image.url+"/-1"
confidence = 0
for r in annotation[url]['regions']:
confidence += r['region_attributes']['confidence']
if len(annotation[url]['regions']):
confidence /= len(annotation[url]['regions'])
context = {
'prescription': Prescription.objects.get(id=prescription_id),
'predicted':p,
'overall_confidence': round(confidence,2)
}
return render(request, 'pages/singleView.html', context=context)
else:
return redirect('login')
def annotatePrescription(request, prescription_id):
if request.user.is_authenticated:
context = {
'prescription': Prescription.objects.get(id=prescription_id),
}
return render(request, 'annotator/via.html', context=context)
else:
return redirect("login")
def medication(result):
res = ''
for word in result:
res += word[1]+ ' '
comprehendmedical = boto3.client('comprehendmedical',
aws_access_key_id=ACCESS_KEY_ID,
aws_secret_access_key = ACCESS_SECRET_KEY,
region_name='us-west-2')
result = comprehendmedical.detect_entities(Text= res)
entities = result['Entities']
def predictPrescription(request, prescription_id):
if request.user.is_authenticated:
image_data = Prescription.objects.get(id=prescription_id).image
img = str(image_data)
if img:
response = s3.upload_file(
Bucket = BUCKET_NAME,
Filename=img,
Key = img
)
objs = s3.list_objects_v2(Bucket=BUCKET_NAME)['Contents']
objs.sort(key=lambda e: e['LastModified'], reverse=True)
first_item = list(objs[0].items())[0]
documentName = str(first_item[1])
# Call Amazon Textract
with open(documentName, "rb") as document:
response = textract.analyze_document(
Document={
'Bytes': document.read(),
},
FeatureTypes=["FORMS"])
# print(response)
preds = convert(response,img,img.split('/')[-1])
# print(preds)
prescription = Prescription.objects.get(id=prescription_id)
prescription.annotation= preds
prescription.save()
else:
return redirect("login")
def addAnnotation(request, prescription_id):
prescription = Prescription.objects.get(id=prescription_id)
annotations = request.POST['annotation']
annotations = json.loads(annotations)
prescription.annotation = annotations
prescription.save()
return JsonResponse({"abc":"dad"})
def deletePrescription(request, prescription_id):
if request.user.is_authenticated:
search = None
prescription = Prescription.objects.get(id=prescription_id)
if request.user == prescription.uploaded_by:
prescription.delete()
return redirect( "home")
else:
return redirect('login')
def viewApproval(request):
if request.user.is_authenticated:
result = Approval.objects.filter(checkedBy = request.user)
context = {
'fetchedApprovals' : result,
}
return render(request, 'pages/viewApproval.html', context=context)
else:
return redirect('login')
def processApproval(request,prescription_id):
if request.user.is_authenticated:
prescription = Prescription.objects.get(id=prescription_id)
annotations = prescription.annotation
annotated_image, digitized_image,x = viewAnnotation(annotations, image_path = prescription.image.url)
c = 0
listAnnotations = []
for annotation in annotations[prescription.image.url+"/-1"]['regions']:
c+=1
listAnnotations.append(annotation['region_attributes']['text'])
context = {
'annotated_image_uri': annotated_image,
'digitised_image_uri': digitized_image,
'noOfAnnotations' : c,
'prescription_id' : prescription_id,
'listAnnotations' : listAnnotations
}
return render(request, 'pages/approvalPage.html', context=context)
else:
return redirect('login')
# def updateApproval(request,prescription_id):
# if request.user.is_authenticated:
# prescription = Prescription.objects.get(id=prescription_id)
# approval = Approval.objects.get(prescription = prescription,checkedBy = request.user)
# approval.status = "Reviewed"
# correctAnnotations = request.POST['correctAnnotations']
# noOfAnnotations = request.POST['noOfAnnotations']
# ratio = int(correctAnnotations) / int(noOfAnnotations)
# print(ratio, "------>")
# prescription.confidence = calculateConfidence(prescription.noChecked,prescription.confidence,ratio)
# prescription.noChecked = prescription.noChecked + 1
# approval.save()
# prescription.save()
# # result = Approval.objects.filter(checkedBy = request.user)
# # context = {
# # 'fetchedApprovals' : result
# # }
# # return render(request, 'pages/viewApproval.html', context=context)
# return redirect('approvals')
# else:
# return redirect('login')
def updateApproval(request,prescription_id):
if request.user.is_authenticated:
prescription = Prescription.objects.get(id=prescription_id)
approval = Approval.objects.get(prescription = prescription,checkedBy = request.user)
correctAnnotations = request.POST['correctAnnotations']
noOfAnnotations = request.POST['noOfAnnotations']
ratio = int(correctAnnotations) / int(noOfAnnotations)
if approval.status == "Reviewed":
prescription.confidence = calculateConfidence(prescription.noChecked,prescription.confidence,ratio)
else :
approval.status = "Reviewed"
prescription.confidence = calculateConfidence(prescription.noChecked,prescription.confidence,ratio)
prescription.noChecked = prescription.noChecked + 1
approval.save()
prescription.save()
return redirect('approvals')
else:
return redirect('login')
def dashboard(request):
# if request.user.is_authenticated:
return render(request, 'pages/dashboard.html')
# else:
# return redirect('login')
def customerView(request):
if request.user.is_authenticated:
return render(request,'pages/uploadCustomer.html')
else:
return redirect('login')
def customerUploadForm(request):
if request.user.is_authenticated:
if request.method == 'POST':
phoneNumber = request.POST['phoneNumber']
image = request.FILES['prescription_image']
obj = CustomerPrescription(uploaded_by=request.user, image=image, phoneNumber = int(phoneNumber))
obj.save()
predictCustomerPrescription(request, obj.id)
prescription = CustomerPrescription.objects.get(id=obj.id)
annotation = CustomerPrescription.objects.get(id=obj.id).annotation
url = prescription.image.url+"/-1"
res = ''
PROTECTED_HEALTH_INFORMATION = []
info = {}
Medication = {}
med=[]
c = []
ph=[]
f=[]
test_treatment = []
medicalCondition = []
Anatomy = []
if len(annotation[url]['regions']):
for r in annotation[url]['regions']:
res+=" "+r['region_attributes']['text']
result = comprehendmedical.detect_entities(Text= res)
entities = result['Entities']
# print(entities)
for key in entities:
if key['Category'] == 'PROTECTED_HEALTH_INFORMATION':
ph.append(key['Text'])
ph.append(key['Type'])
f.append(ph)
ph=[]
elif key['Category'] == 'MEDICATION':
med.append(key['Text'])
med.append('N.A')
med.append('N.A.')
dosage = -1
frequency = -1
if 'Attributes' in key:
for i in key['Attributes']:
if i['Type'] == 'DOSAGE':
dosage = i['Text']
med[1]=i['Text']
elif i['Type'] == 'FREQUENCY':
frequency = i['Text']
med[2]=i['Text']
c.append(med)
med=[]
if key['Text'] not in Medication:
Medication[key['Text']] = [dosage,frequency]
elif key['Category'] == 'TEST_TREATMENT_PROCEDURE':
test_treatment.append(key['Text'])
elif key['Category'] == 'MEDICAL_CONDITION':
medicalCondition.append(key['Text'])
elif key['Category'] == 'ANATOMY':
Anatomy.append(key['Text'])
prescription.medication = Medication
prescription.save()
print(c, "----->")
medicineList = c
medicineImageUrl = []
for medicine in medicineList:
img_url, name = scrapeMedicineImage(medicine)
medicineImageUrl.append([img_url, name])
for image in medicineImageUrl:
sendTextWhatsapp(phoneNumber, image[1], image[0])
context = {
"phoneNumber" : phoneNumber,
"medicine_data": medicineImageUrl,
}
return render(request,'pages/sentToWhatsapp.html', context= context)
else:
return redirect('customerView')
else:
return redirect('login')
def predictCustomerPrescription(request, prescription_id):
if request.user.is_authenticated:
image_data = CustomerPrescription.objects.get(id=prescription_id).image
img = str(image_data)
if img:
response = s3.upload_file(
Bucket = BUCKET_NAME,
Filename=img,
Key = img
)
objs = s3.list_objects_v2(Bucket=BUCKET_NAME)['Contents']
objs.sort(key=lambda e: e['LastModified'], reverse=True)
first_item = list(objs[0].items())[0]
documentName = str(first_item[1])
# Call Amazon Textract
with open(documentName, "rb") as document:
response = textract.analyze_document(
Document={
'Bytes': document.read(),
},
FeatureTypes=["FORMS"])
preds = CustomerConvert(response,img,img.split('/')[-1])
prescription = CustomerPrescription.objects.get(id=prescription_id)
prescription.annotation= preds
prescription.save()
else:
return redirect("login") | adityarajsahu/Medecoder | prescription/views.py | views.py | py | 21,675 | python | en | code | 0 | github-code | 13 |
33419177566 | from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
import time
# Website of typing test
URL = 'https://10fastfingers.com/typing-test/english'
# Open chrome webdriver to full screen
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.maximize_window()
driver.get(URL)
wait = WebDriverWait(driver, 10)
# # Close the cookie notification at top
closeCookie = wait.until(EC.presence_of_element_located((By.ID, 'CybotCookiebotDialogBodyLevelButtonAccept')))
closeCookie.click()
# Wait for the words to appear on the page
wordlist = wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'highlight')))
# Find the input box
input = driver.find_element_by_id("inputfield")
# Type each word out
for i in range(345):
# Find the current word to type
word = driver.find_element_by_class_name("highlight").text
# Test is over
if word == ' ':
break
# Type each word 1 character at a time w/ slight delay to look more humanlike
for aChar in word:
input.send_keys(aChar)
time.sleep(.04)
# Enter space after each word to move on
input.send_keys(Keys.SPACE)
# Pause for 5 seconds after test is done then close browser
time.sleep(5)
driver.close() | mwzheng/autoTypingTest | 10FastFingers.py | 10FastFingers.py | py | 1,465 | python | en | code | 0 | github-code | 13 |
20126841066 | import graphics.engine
from math import pi as PI
MODEL = 0
if MODEL == 0:
PATH = "C:/Users/robin/Documents/Projects/OXYTOCIN 3D/models/Oxytocin.txt"
FORMAT = {'atom': {'len': 70, 'type':31, 'indices':{0:[3, 10], 1:[13, 20], 2:[23, 30]}},
'bond': {'len': 19, 'indices': {0: [0, 3], 1: [3, 6]}, 'multiplicity':8}}
elif MODEL == 1:
PATH = "C:/Users/robin/Documents/Projects/OXYTOCIN 3D/models/oxytocin_molview2.mol"
FORMAT = {'atom': {'len': 70, 'type':31, 'indices':{0:[3, 10], 1:[13, 20], 2:[23, 30]}},
'bond': {'len': 22, 'indices': {0: [0, 3], 1: [3, 6]}, 'multiplicity':8}}
COL = {
"H": "lightgray",
"C": "black",
"N": "yellow",
"O": "red",
"S": "green",
"marked": "pink"}
def parseModel():
FILE = open(PATH)
atomPoints, bondIndices, bondPoints, i = [], [], [], 0
for line in FILE:
if len(line) == FORMAT['atom']['len']:
x = line[FORMAT['atom']['indices'][0][0]:FORMAT['atom']['indices'][0][1]]
y = line[FORMAT['atom']['indices'][1][0]:FORMAT['atom']['indices'][1][1]]
z = line[FORMAT['atom']['indices'][2][0]:FORMAT['atom']['indices'][2][1]]
t = line[FORMAT['atom']['type']]
#print(i, float(x), float(y), float(z), str(t))
#print(line)
i += 1
atomPoints.append([[float(x), float(y), float(z)], {'num': i, 'type':str(t)}])
if len(line) == FORMAT['bond']['len']:
first = int(line[FORMAT['bond']['indices'][0][0]:FORMAT['bond']['indices'][0][1]]) -1
second = int(line[FORMAT['bond']['indices'][1][0]:FORMAT['bond']['indices'][1][1]]) -1
m = int(line[FORMAT['bond']['multiplicity']])
#print(first, second, m)
#print(line)
bondIndices.append([[first, second], {'multiplicity': m}])
bondPoints.append([[atomPoints[first][0], atomPoints[second][0]], {'col': None, 'outline': 'black', 'multiplicity':m}])
FILE.close()
return atomPoints, bondIndices, bondPoints
def atomSurfacePointGen2(atomPoints, markAtoms = [], style=['tetrahedron', 0.15]):
from math import acos, sin, cos
a = acos(1/3)
atomSurfPoints, atomSurfTriangleIndices, atomSurfTrianglePoints, i = [], [], [], 0
while i < len(atomPoints):
p = atomPoints[i]
co = p[0]
r = style[1]
c = COL[p[1]['type']]
r = style[1]
if i in markAtoms:
t = "marked"
r = 0.3
atomSurfPoints.append([[co[0]+r*sin(a) , co[1] , co[2] - r*cos(a)], {'col': c, 'outline': 'blue'}])
atomSurfPoints.append([[co[0]-r*sin(a)*sin(PI/6), co[1] + r*sin(a)*cos(PI/6), co[2] - r*cos(a)], {'col': c, 'outline': 'blue'}])
atomSurfPoints.append([[co[0]-r*sin(a)*sin(PI/6), co[1] - r*sin(a)*cos(PI/6), co[2] - r*cos(a)], {'col': c, 'outline': 'blue'}])
atomSurfPoints.append([[co[0] , co[1] , co[2] + r ], {'col': c, 'outline': 'blue'}])
l = len(atomSurfPoints) - 4 - 1
atomSurfTriangleIndices.append([[l, l+1, l+2], {'col': c, 'outline': None}])
atomSurfTriangleIndices.append([[l, l+1, l+3], {'col': c, 'outline': None}])
atomSurfTriangleIndices.append([[l, l+2, l+3], {'col': c, 'outline': None}])
atomSurfTriangleIndices.append([[l+1, l+2, l+3], {'col': c, 'outline': None}])
atomSurfTrianglePoints.append([[atomSurfPoints[l+x][0] for x in [0, 1, 2]], {'col': c, 'outline': None}])
atomSurfTrianglePoints.append([[atomSurfPoints[l+x][0] for x in [0, 1, 2]], {'col': c, 'outline': None}])
atomSurfTrianglePoints.append([[atomSurfPoints[l+x][0] for x in [0, 2, 3]], {'col': c, 'outline': None}])
atomSurfTrianglePoints.append([[atomSurfPoints[l+x][0] for x in [1, 2, 3]], {'col': c, 'outline': None}])
i += 1
return atomSurfPoints, atomSurfTriangleIndices, atomSurfTrianglePoints
atomPoints, bondIndices, bondPoints = parseModel()
atomSurfPoints, atomSurfTriangleIndices, atomSurfTrianglePoints = atomSurfacePointGen2(atomPoints)
points = []
lines = bondPoints
triangles = atomSurfTrianglePoints
def atomCenterPointGen():
for i in open(PATH):
if i[0] != "#" and len(i) == 70:
x = float(i[4:10])
y = float(i[13:20])
z = float(i[23:30])
t = i[31]
points.append([x, y, z, "atom", t])
print([x, y, z, t])
def atomSurfacePointGen():
import math
col = {"H": "lightgray",
"C": "black",
"N": "yellow",
"O": "red",
"S": "green",
"Mark": "pink"}
a = math.acos(1/3)
s = 0.15
markAtom = 1
i = 0
while i < len(points):
p = points[i]
if len(p) != 4:
print(i, p)
if p[3] == "atom":
t = p[4]
if i == markAtom:
t = "Mark"
s = 0.3
points.append([p[0]+s*math.sin(a), p[1], p[2] - s*math.cos(a)])
points.append([p[0]-s*math.sin(a)*math.sin(math.pi/6), p[1] + s*math.sin(a)*math.cos(math.pi/6), p[2] - s*math.cos(a)])
points.append([p[0]-s*math.sin(a)*math.sin(math.pi/6), p[1] - s*math.sin(a)*math.cos(math.pi/6), p[2] - s*math.cos(a)])
points.append([p[0], p[1], p[2] + s ])
if i == markAtom:
s = 0.15
l = len(points)-4
triangles.append([l, l+1, l+2, col[t]])
triangles.append([l, l+1, l+3, col[t]])
triangles.append([l, l+2, l+3, col[t]])
triangles.append([l+1, l+2, l+3, col[t]])
points[i] = points[i][:3]
i += 1
def bondLineIndexGen():
file = open(PATH)
for i in file:
if len(i) == FORMAT:
first = int(i[0:3])
second = int(i[3:6])
amount = int(i[6:9])
lines.append([first-1, second-1, 'black'])
#print(first, second)
print("line:", first, second, i)
def calcDirectionVectors():
import math
r = 5
p = bondLinePointGen()
i = 0
while i < len(p):
#d = [p[i][0][0]-p[i][1][0], p[i][0][1]-p[i][1][1], p[i][0][2]-p[i][1][2]]
d = [p[i][0][y] - p[i][1][y] for y in range(0, 3)]
n = math.sqrt(d[0]**2 + d[1]**2 + d[2]**2)
d = [x/n for x in d]
if d[0] < 0:
d = [-x for x in d]
#print("Direction vector", d)
i += 1
def calcBondAngles():
import math
r = 5
p = bondAnglePointGen()
i = 0
while i < len(p):
directionVectors = []
p0 = p[i][1]
for j in p[i][2]:
directionVectors.append([j[c] - p0[c] for c in range(0, 3)])
alphas = []
i1 = 0
while i1 < len(directionVectors):
i2 = 0
while i2 < len(directionVectors):
if i1 < i2: # removes duplicates --> 12, 13, 14, 23, 24, 34 done
j, k = directionVectors[i1], directionVectors[i2]
alpha = math.acos(sum(j[x]*k[x] for x in range(0, 3))/math.sqrt((j[0]**2+j[1]**2+j[2]**2)*(k[0]**2+k[1]**2+k[2]**2)))
alphas.append(round(alpha/math.pi*180, 1))
i2 += 1
i1 += 1
print("Atom bond angles around ", p[i][0], ": ", alphas)
i += 1
def bondLinePointGen():
p = []
for i in open(PATH):
if len(i) == 70:
x = float(i[4:10])
y = float(i[13:20])
z = float(i[23:30])
t = i[31]
p.append([x, y, z, t])
#print([x, y, z, t])
if len(i) == 19:
first = int(i[0:3])
second = int(i[3:6])
amount = int(i[6:9])
bonds2x3.append([p[first-1], p[second-1]])
#print(first, second)
return bonds2x3
def bondAnglePointGen():
p, bondIndices = [], []
for i in open(PATH):
if len(i) == 70:
x = float(i[4:10])
y = float(i[13:20])
z = float(i[23:30])
t = i[31]
p.append([x, y, z])
if len(i) == FORMAT:
first = int(i[0:3])
second = int(i[3:6])
bondIndices.append([first-1, second-1])
i = 0
connectedAtomIndices, connectedAtomPoints = [], [] # [central point, [connected points]]
while i < len(p):
matchedIndices = []
for j in bondIndices:
if j[0] == i:
matchedIndices.append(j[1])
elif j[1] == i:
matchedIndices.append(j[0])
if len(matchedIndices) > 2:
connectedAtomIndices.append([i, matchedIndices])
i += 1
for i in connectedAtomIndices:
matchedPoints = [p[x][:3] for x in i[1]]
connectedAtomPoints.append([i[0], p[i[0]], matchedPoints])
return connectedAtomPoints
def guidePlanes():
import math
a = math.acos(1/3)
x, y, z = 4, 5, 8
p1 = [[-x, -y, 0], [x+3, -y, 0], [-x, y, 0], [x+3, y, 0]]
p2 = [[z*math.sin(a), -y, -z*math.cos(a)], [-z*math.sin(a), -y, -z*math.cos(a)], [z*math.sin(a), y, z*math.cos(a)], [-z*math.sin(a), y, z*math.cos(a)]]
p3 = [[z*math.sin(a), -y, z*math.cos(a)], [-z*math.sin(a), -y, z*math.cos(a)], [z*math.sin(a), y, -z*math.cos(a)], [-z*math.sin(a), y, -z*math.cos(a)]]
global points
points += p1
points += p2
points += p3
l = len(points) - 12
triangles.append([l , l+1, l+2, "gray", None])
triangles.append([l+1, l+2, l+3])
triangles.append([l+4, l+5, l+6])
triangles.append([l+5, l+6, l+7])
triangles.append([l+8, l+9, l+10])
triangles.append([l+9, l+10, l+11])
#atomCenterPointGen()
#atomSurfacePointGen()
#bondLineIndexGen()
#calcDirectionVectors()
#calcBondAngles()
#guidePlanes()
eng = graphics.engine.Engine3D(points, lines, triangles, width=1800, height=1000, distance=20, scale=40, title='Oxy')
def animation():
eng.clear()
eng.render()
eng.screen.after(1, animation)
animation()
eng.screen.window.mainloop()
| RobinAnne/SimplePython3DMoleculeRenderer | deprecated/oxytocin_old.py | oxytocin_old.py | py | 10,473 | python | en | code | 0 | github-code | 13 |
45352587754 | class Solution(object):
def rob(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 1:
return sum(nums)
def rob(nums):
prev = now = 0
for n in nums:
now, prev = max(prev + n, now)
return now
return max(rob(nums[1:]), rob(nums[:-1]))
if __name__ == '__main__':
main() | zh0uquan/LeetcodePractice | Leetcode213.py | Leetcode213.py | py | 414 | python | en | code | 0 | github-code | 13 |
4970348103 | s = input()
t = input()
n = len(s)
made = [False] * n
d = dict()
for c in t:
if c in d:
d[c] += 1
else:
d[c] = 1
yays = whoops = 0
for i in range(n):
if s[i] in d and d[s[i]] > 0:
d[s[i]] -= 1
yays += 1
made[i] = True
for i in range(n):
if made[i] == False:
c = s[i]
if c.upper() in d and d[c.upper()] > 0:
d[c.upper()] -= 1
whoops += 1
elif c.lower() in d and d[c.lower()] > 0:
d[c.lower()] -= 1
whoops += 1
print(yays, whoops) | truclycs/code_for_fun | algorithms/python/python_blue/L15/B._Tanya_and_Postcard.py | B._Tanya_and_Postcard.py | py | 599 | python | en | code | 7 | github-code | 13 |
39732424554 | # -*- coding: utf-8 -*-
import requests
import random
import string
from lxml import html as xhtml
from selenium import webdriver
douban_dict = {
'search_url': 'https://movie.douban.com/subject_search?search_text=',
'search_result_page_filter': '//div[starts-with(@class, "sc-bZQynM")][1]/div/a/@href',
'movie_profile_page_filter': '//a[@class="nbgnbg"]/@href',
'post_grid_page_filter': '//div[@class="cover"][1]/a/@href',
'post_page_filter': '//img[@src]/@src'
}
def read_from_file(file_name):
file = open(file_name, 'r', encoding='utf-8')
name_list = []
for line in file:
if line != '\n':
# print(line)
line = line.strip('\n')
name_list.append(line)
file.close()
print('成功获取到电影列表, 共计', len(name_list), '条')
return name_list
def construct_douban_cookie():
url = 'https://www.douban.com'
bid_value = ''.join(random.sample(string.ascii_letters + string.digits, 11))
cookie = requests.get(url).cookies
cookie.pop('bid')
c = requests.cookies.RequestsCookieJar()
c.set('bid', bid_value, path='/', domain='.douban.com')
cookie.update(c)
return cookie
def init_web_driver():
options = webdriver.ChromeOptions()
options.set_headless()
options.add_argument('--disable-gpu')
options.add_argument('lang=zh_CN.UTF-8')
options.add_argument(
'user-agent="Mozilla/5.0 (X11; Linux x86_64) \
AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3080.5 Safari/537.36"')
driver = webdriver.Chrome(options=options)
return driver
def pars_static_page(url, xpath_filter):
response = requests.get(url, cookies=construct_douban_cookie())
page = xhtml.fromstring(response.content)
parsed_url = page.xpath(xpath_filter)
return parsed_url
def pars_dynamic_page(driver, url, xpath_filter):
driver.get(url)
page = xhtml.fromstring(driver.page_source)
parsed_url = page.xpath(xpath_filter)
return parsed_url
def join_douban_url(movie_name):
return douban_dict['search_url'] + movie_name
def output_result(file_name, url_dict):
if not url_dict:
print('未写入文件。')
return
str = ''
for key, url in url_dict.items():
str += key + '\n' + url + '\n\n'
file = open(file_name, 'w', encoding='utf-8')
file.write(str)
file.close()
def crawl(input_file, output_file):
name_post_dict = {}
driver = init_web_driver()
name_list = read_from_file(input_file)
success_count = 0
failure_count = 0
for name in name_list:
try:
profile_url = pars_dynamic_page(driver, join_douban_url(name), douban_dict['search_result_page_filter']).pop(0)
post_grid_url = pars_static_page(profile_url, douban_dict['movie_profile_page_filter']).pop(0)
post_page_url = pars_static_page(post_grid_url, douban_dict['post_grid_page_filter']).pop(0)
img_url = pars_static_page(post_page_url, douban_dict['post_page_filter']).pop(0)
name_post_dict[name] = img_url
print('%-28s' % name, '获取成功:', img_url)
success_count += 1
except:
print('%-20s' % name, '获取失败!')
failure_count += 1
print('获取成功总计:', success_count)
print('获取失败总计:', failure_count)
output_result(output_file, name_post_dict)
if __name__ == '__main__':
crawl('test.txt', 'result.txt')
| chadschan163/DoubanMoviePostSniffer | doubanMoviePost.py | doubanMoviePost.py | py | 3,488 | python | en | code | 2 | github-code | 13 |
2699971861 | import calendar
import datetime
import unittest
def get_total_days_from(first, last):
days_in_period = last - first
first_day_of_period = datetime.timedelta(days=1)
return (days_in_period + first_day_of_period).days
def get_days_of_month(date):
# recomment to __
_, days_in_month = calendar.monthrange(date.year, date.month)
return days_in_month
def get_budget_per_day(budget_in_month, days_in_month):
return budget_in_month / days_in_month
def get_first_month_budget(budget, first, total_days_in_first_month):
average_first_budget = get_budget_per_day(
budget[first.month],
total_days_in_first_month
)
days_in_first_month = total_days_in_first_month - first.day + 1
first_month_budget = average_first_budget * days_in_first_month
return first_month_budget
def get_last_month_budget(
budget,
first,
last,
total_days_in_first_month,
total_days_in_last_month,
total_days,
):
average_last_month_budget = get_budget_per_day(
budget[last.month],
total_days_in_last_month
)
days_in_first_month = total_days_in_first_month - first.day + 1
days_in_last_month = total_days - days_in_first_month
last_month_budget = average_last_month_budget * days_in_last_month
return last_month_budget
def find_budget(first, last):
budget = {
9: 1000,
10: 500,
11: 800,
12: 1000,
}
total_days = get_total_days_from(first, last)
total_days_in_first_month = get_days_of_month(first)
total_days_in_last_month = get_days_of_month(last)
amount = get_first_month_budget(budget, first, total_days_in_first_month)
for month in range(first.month + 1, last.month):
amount += budget[month]
total_days -= get_days_of_month(datetime.date(first.year, month, 1))
amount += get_last_month_budget(
budget,
first,
last,
total_days_in_first_month,
total_days_in_last_month,
total_days,
)
return round(amount, 2)
class TestBudget(unittest.TestCase):
def test_1_sep_to_1_sep_should_return_budget_33_dot_33(self):
first = datetime.datetime(2018, 9, 1)
last = datetime.datetime(2018, 9, 1)
actual = find_budget(first, last)
self.assertEqual(actual, 33.33)
def test_1_sep_to_5_sep_should_return_budget_166_dot_67(self):
first = datetime.datetime(2018, 9, 1)
last = datetime.datetime(2018, 9, 5)
actual = find_budget(first, last)
self.assertEqual(actual, 166.67)
def test_1_sep_to_10_oct_should_return_budget_1161_dot_29(self):
first = datetime.datetime(2018, 9, 1)
last = datetime.datetime(2018, 10, 10)
actual = find_budget(first, last)
self.assertEqual(actual, 1161.29)
def test_3_sep_to_10_oct_should_return_budget_1161_dot_29(self):
first = datetime.datetime(2018, 10, 3)
last = datetime.datetime(2018, 11, 10)
actual = find_budget(first, last)
self.assertEqual(actual, 734.41)
# @unittest.skip(reason='Still failed')
def test_5_oct_to_10_dec_should_return_budget_734_dot_41(self):
first = datetime.datetime(2018, 9, 5)
last = datetime.datetime(2018, 12, 10)
actual = find_budget(first, last)
self.assertEqual(actual, 2489.25)
unittest.main()
| saowaluck/Budget | Budget.py | Budget.py | py | 3,380 | python | en | code | 0 | github-code | 13 |
32875695255 | import numpy as np
import torch.nn as nn
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
print('ANN Model \n')
print(f"PyTorch version: {torch.__version__}")
# Check PyTorch has access to MPS (Metal Performance Shader, Apple's GPU architecture)
print(f"Is MPS (Metal Performance Shader) built? {torch.backends.mps.is_built()}")
print(f"Is MPS available? {torch.backends.mps.is_available()}")
# Set the device
print('CUDA available:', torch.cuda.is_available())
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device} \n")
if __name__ == '__main__':
X = np.load('X.npy')
y = np.load('y.npy')
y = np.array([0 if l == 'cat' else 1 for l in y])
# cat is 0, dog is 1
print('X shape: ', X.shape, '\ny shape: ', y.shape)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.9)
train_data = TensorDataset(torch.Tensor(X_train), torch.Tensor(y_train))
test_data = TensorDataset(torch.Tensor(X_test), torch.Tensor(y_test))
# Multithread processing
train_loader = DataLoader(train_data, shuffle=True, batch_size=10, num_workers=8)
test_loader = DataLoader(test_data, batch_size=len(test_data.tensors[0]))
class ANN(nn.Module):
def __init__(self, input_size):
super().__init__()
self.input = nn.Linear(in_features=input_size, out_features= 2000).to(device)
self.hidden_1 = nn.Linear(in_features= 2000, out_features= 2000).to(device)
self.hidden_2 = nn.Linear(in_features= 2000, out_features= 4000).to(device)
self.output = nn.Linear(in_features= 4000, out_features=2).to(device)
def forward(self, x):
x = nn.functional.relu(self.input(x))
x = nn.functional.relu(self.hidden_1(x))
x = nn.functional.relu(self.hidden_2(x))
return torch.sigmoid(self.output(x))
model = ANN(X.shape[1])
print(model)
num_epochs = 200
train_accuracies, test_accuracies = [], []
loss = nn.CrossEntropyLoss()
adam = torch.optim.RMSprop(params=model.parameters(), lr=0.001)
best_test_loss = 0
patience = 5
count = 0
for epoch in range(num_epochs):
# Train set
batch = 0
for X, y in train_loader:
preds = model(X.to(device))
pred_labels = torch.argmax(preds, axis=1)
loss_ = loss(preds, y.long())
print('Batch: ', batch, ' Loss: ', loss_)
adam.zero_grad()
loss_.backward()
adam.step()
batch += 1
train_accuracies.append(100 * torch.mean((pred_labels == y).float()).item())
test_preds = []
test_labels = []
for X, y in test_loader:
preds = model(X)
pred_labels = torch.argmax(preds, axis=1)
test_preds.extend(pred_labels.numpy())
test_labels.extend(y.numpy())
test_accuracy = 100 * np.mean(np.array(test_preds) == np.array(test_labels))
print('epoch: ', epoch, ' Accuracy: ', 100 * torch.mean((pred_labels == y).float()).item())
if test_accuracy > best_test_loss:
best_test_loss = test_accuracy
count = 0
else:
count += 1
if count >= patience:
print('Early stopping...')
break
# Test set
X, y = next(iter(test_loader))
pred_labels = torch.argmax(model(X), axis=1)
test_accuracies.append(100 * torch.mean((pred_labels == y).float()).item())
torch.save(model.state_dict(), 'ANN_Model_Torch')
fig = plt.figure(tight_layout=True)
gs = gridspec.GridSpec(nrows=2, ncols=1)
ax = fig.add_subplot(gs[0, 0])
ax.plot(train_accuracies)
ax.set_xlabel("Epoch")
ax.set_ylabel("Training accuracy")
ax = fig.add_subplot(gs[1, 0])
ax.plot(test_accuracies)
ax.set_xlabel("Epoch")
ax.set_ylabel("Test accuracy")
fig.align_labels()
plt.show()
| PatrickDabkowski/Bark-Recognition | ANN_Model.py | ANN_Model.py | py | 4,123 | python | en | code | 0 | github-code | 13 |
11125684202 | from rest_framework import serializers
from django.db import connection
from .models import DynamicModel
class TableUpdateSerializer(serializers.Serializer):
field_types = serializers.ListField(child=serializers.CharField())
field_titles = serializers.ListField(child=serializers.CharField())
def validate_field_types(self, value):
for field_type in value:
if field_type not in ["string", "number", "boolean"]:
raise serializers.ValidationError(
"Invalid field type. Field types must be 'string', 'number', or 'boolean'."
)
return value
def validate(self, attrs):
field_types = attrs.get("field_types")
field_titles = attrs.get("field_titles")
if field_types and field_titles:
if len(field_types) != len(field_titles):
raise serializers.ValidationError("field_types and field_titles must have the same length")
return attrs
class TableCreateSerializer(TableUpdateSerializer):
table_name = serializers.CharField()
def validate_table_name(self, value):
# Check if a table with the given name already exists
table_exists = DynamicModel.objects.filter(name=value).first()
if table_exists:
raise serializers.ValidationError(f"A table with the name '{value}' already exists.")
return value
def create_dynamic_serializer(columns):
fields = {}
for column, column_type in columns.items():
field_name = column.lower().replace(" ", "_")
if column_type == "string":
field = serializers.CharField()
elif column_type == "number":
field = serializers.FloatField()
elif column_type == "boolean":
field = serializers.BooleanField()
else:
field = serializers.CharField() # Customize based on your specific column types
fields[field_name] = field
DynamicSerializer = type("DynamicSerializer", (serializers.Serializer,), fields)
return DynamicSerializer
| emisgrg/table_builder | table_builder_app/serializers.py | serializers.py | py | 2,068 | python | en | code | 0 | github-code | 13 |
16070192037 | import multiprocessing
import time
def sleep(x):
print(f'Sleeping for {x} seconds....')
time.sleep(x)
print(f'Done sleeping for {x} seconds!')
def main():
start = time.perf_counter()
p1 = multiprocessing.Process(target=sleep, args=[1.5])
p2 = multiprocessing.Process(target=sleep, args=[1.5])
p1.start()
p2.start()
p1.join()
p2.join()
finish = time.perf_counter()
print(finish - start)
if __name__ == '__main__':
main()
| sejin8642/gitpractice | scripts/mp.py | mp.py | py | 480 | python | en | code | 0 | github-code | 13 |
34155118333 | # -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
class Headers:
def __init__(self, headers):
self._headers = headers
def has_xframe_defence(self):
try:
xframe = self._headers['X-Frame-Options']
return True
except:
return False
def has_hsts_defence(self):
try:
hsts = self._headers['Strict-Transport-Security']
return True
except:
return False
def has_x_content_type_options_defence(self):
try:
x_content_type_options = self._headers['X-Content-Type-Options']
if x_content_type_options == 'no-sniff' or x_content_type_options == 'nosniff':
return True
return False
except:
return False
def has_http_only_defence(self):
try:
set_cookie = self._headers['Set-Cookie']
if 'httponly' in set_cookie.lower():
return True
return False
except:
return False
def has_secure_cookie_defence(self):
try:
set_cookie = self._headers['Set-Cookie']
if 'secure' in set_cookie.lower():
return True
return False
except:
return False
def has_content_security_policy_defence(self):
try:
csp = self._headers['Content-Security-Policy']
return True
except:
return False
| security-analyzer/core | scanners/defense_mechanisms/Headers.py | Headers.py | py | 1,493 | python | en | code | 10 | github-code | 13 |
17382029520 | import numpy as np
from scipy.ndimage.filters import convolve
def energyRGB(image):
'''
Function calculates the energy of an RGB image
Parameters:
image: RGB image
Returns:
res: Energy of the image
'''
res = energyGrey(image[:,:,0]) + energyGrey(image[:,:,1]) + energyGrey(image[:,:,2])
return res
def energyGrey(image):
'''
Function calculates the energy of an image with single colour channel using 1st derivative
gradient filters in both x and y direction.
Parameters:
image: Single channel image
Returns:
res: Energy of the image
'''
gx = np.array([[-1,0,1]]).astype(np.float) # Gradient filter in x dirrection
gy = np.array([[-1],[0],[1]]).astype(np.float) # Gradient filter in y dirrection
# Energy = |derivative in x| + |derivative in y|
res = np.abs(convolve(image, gx, mode='nearest')) + np.abs(convolve(image, gy, mode='nearest'))
return res
| PabitraBansal/Content-Aware-Image-Resizing | energy.py | energy.py | py | 1,030 | python | en | code | 0 | github-code | 13 |
6454407657 | import json
import time
import datetime
import requests
import os
import boto3
def lambda_handler(event, context):
# connect to the table
AIRTABLE_BASE_ID = 'appYQAU5CcytTTkKs'
AIRTABLE_NAME = 'MainTable'
API_KEY = 'keyrT0V59Ejlc9JaH'
#create url for connect_
END_POINT = f'https://api.airtable.com/v0/{AIRTABLE_BASE_ID}/{AIRTABLE_NAME}'
# create headers
headers = {
"Authorization": f"Bearer {API_KEY}",
"Content-Type": "application/json"
}
# get request in json formats
r = requests.get(END_POINT, headers=headers)
# get data from dictionary
z_data = r.json()['records']
# sort data_by_id and get sorted list that contain dictionary
sort_list = sorted(z_data, key=lambda x: x['fields']['ID'])
# make list from sort list that contain only title field
new_list = [(z['fields']['title']) for z in sort_list]
# create algorithm of extraction data as circle buffer that change sequently every second
# before getting index make 5 sec pause for correct data
time.sleep(5)
# get index from logs event
client = boto3.client('logs')
stream_response = client.describe_log_streams(
logGroupName="/aws/lambda/sam-hello-world-HelloWorldFunction-YhDOKxjYdMDy", # Can be dynamic]
orderBy='LastEventTime', # For the latest events
limit=50
)
name_of_logs = stream_response['logStreams'][-1:][0]['logStreamName']
response = client.get_log_events(
logGroupName="/aws/lambda/sam-hello-world-HelloWorldFunction-YhDOKxjYdMDy",
logStreamName=f'{name_of_logs}'
)
start_index = int((len(response['events']) / 4))
# and index should be more on 3 as we get 3 records
end_index = start_index + 3
# but if we get end index that more len of our list data make another list
# (get end of list and extend it from start list)
if end_index > len(new_list):
result = new_list[start_index:end_index]
result.extend(new_list[:end_index - len(new_list)])
else:
result = new_list[start_index:end_index]
return {
"statusCode": 200,
"body": json.dumps(result, ensure_ascii=False)
}
| Tkachmaxim/aws_lambda_boto3 | hello_world/app.py | app.py | py | 2,202 | python | en | code | 0 | github-code | 13 |
15340553741 | class HyperParameters:
"""
Hyperparameters configuration class where all vars are defined
"""
def __init__(self, model_name_, vocab, label_vocab, embeddings_, batch_size_):
self.model_name = model_name_
self.vocab_size = len(vocab) if vocab else "Using AutoTokenizer's vocab"
self.num_classes = len(label_vocab)
self.hidden_dim = 128
self.bidirectional = True
self.embedding_dim = 300
self.num_layers = 1
self.dropout = 0.4
self.embeddings = embeddings_
self.batch_size = batch_size_
def _print_info(self):
"""
prints summary of model's hyperparameters
"""
print("========== Hyperparameters ==========",
f"Name: {self.model_name.replace('_', ' ')}",
f"Vocab Size: {self.vocab_size}",
f"Tags Size: {self.num_classes}",
f"Embeddings Dim: {self.embedding_dim}",
f"Hidden Size: {self.hidden_dim}",
f"BiLSTM: {self.bidirectional}",
f"Layers Num: {self.num_layers}",
f"Dropout: {self.dropout}",
f"Pretrained_embeddings: {False if self.embeddings is None else True}",
f"Batch Size: {self.batch_size}", sep='\n') | elsheikh21/cross-natural-language-inference | hw3/stud/models/hyperparams.py | hyperparams.py | py | 1,257 | python | en | code | 2 | github-code | 13 |
32276602563 | # exercise 157: Both Letter Grades and Grade Points
from decision_making.ex52_bis import lett_to_point
from decision_making.ex53_bis import point_to_lett
def grade_converter(value):
try:
res = point_to_lett(value)
return res
except:
try:
res = lett_to_point(value)
return res
except:
raise ValueError('the supplied input is not valid.')
def main():
mygrade = input('enter either a letter or point grade: ')
while mygrade != '':
try:
print(grade_converter(mygrade))
mygrade = input('enter either a letter or point grade: ')
except:
print('invalid input, retry')
mygrade = input('enter either a letter or point grade: ')
if __name__ == '__main__':
main()
| sara-kassani/1000_Python_example | books/Python Workbook/files_and_exceptions/ex157.py | ex157.py | py | 812 | python | en | code | 1 | github-code | 13 |
23081472406 | """
Last modified 1/9/2022
Extend sequences to include surrounding genomic context
Example usage:
## python data_scripts/extend_introns.py ../database/introns/standard_allsize_min_50_max_600/base_info.dat ../database/introns/standard_allsize_min_50_max_600_extend20/ 20
"""
import sys
import os
from config import DATABASE_PATH
from util.gene_file_io import *
input_file = sys.argv[1] # Must be a .dat file.
output_dir = sys.argv[2]
window = int(sys.argv[3])
# Make sure the bed file has unique entries
def get_unique_bed_data(bed_data, bps):
new_bed_data = []
new_bps = []
chrpos_set = set()
for ii, bed_data_item in enumerate(bed_data):
if tuple(bed_data_item[:3]) not in chrpos_set:
new_bed_data += [bed_data_item]
new_bps += [bps[ii]]
chrpos_set.add(bed_data_item[:3])
return new_bed_data, new_bps
# Convert the .dat file to a .bed file if needed
base_data = read_base_data(input_file)
bed_data = [x[0][1] for x in base_data]
bps = [x[0][0] for x in base_data]
# Various duplicates in the "decoy" list need to be removed
bed_data, bps = get_unique_bed_data(bed_data, bps)
input_bedfile = input_file.replace('.dat', '.bed')
write_bed(bed_data, filename=input_bedfile)
# Create a new bedfile with shifted genome coordinates
f = open(input_bedfile)
input_lines = f.readlines()
f.close()
output_bedfile = os.path.join(output_dir, 'base_info.bed')
output_datfile = os.path.join(output_dir, 'base_info.dat')
f = open(output_bedfile, 'w')
for input_line in input_lines:
input_items = input_line.split('\t')
input_items[1] = str(int(input_items[1]) - window)
input_items[2] = str(int(input_items[2]) + window)
input_items[3] = input_items[3].replace('\n', '')
f.write('%s' % '\t'.join(input_items))
f.close()
genome_file = DATABASE_PATH + 'genome/sacCer3.fa'
fasta_file = fasta_from_bed(output_bedfile, genome_file)
fasta_seq_tags = read_fasta(fasta_file)
fasta_seqs = [x[1] for x in fasta_seq_tags]
clear_tmp_files()
if len(fasta_seqs) != len(bed_data):
raise RuntimeError("fasta sequences retrieved from genome not equal to the length of the bed file")
new_base_data = []
for ii, bp in enumerate(bps):
new_bp = str(int(bp) + window)
new_bed_data = list(bed_data[ii]) + [str(window), str(window)]
new_bed_data[1] = str(int(new_bed_data[1]) - window)
new_bed_data[2] = str(int(new_bed_data[2]) + window)
new_base_data += [((new_bp, tuple(new_bed_data)), fasta_seqs[ii])]
write_base_data(output_datfile, new_base_data)
| ramyarangan/pre-mRNA_SecStruct | src/data_scripts/extend_introns.py | extend_introns.py | py | 2,456 | python | en | code | 0 | github-code | 13 |
7726906677 | # Credits: assets from DeathsbreedGames organization (http://deathsbreedgames.github.io/)
import pygame as pg
from pygame.locals import QUIT
from ball import Ball
from racket import Racket
from ia import IA
from scoreboard import Scoredboard
# Constantes para la inicialización de la superficie de dibujo
WIDTH = 800 # Ancho de la ventana
HEIGHT = 600 # Alto de la ventana
FPS = 60 # Fotogramas por segundo
WHITE = (255, 255, 255) # Color del fondo de la ventana (RGB)
def main():
# Inicialización de pygame
pg.init()
# Inicialización de la superficie de dibujo (display surface)
SCREEN = pg.display.set_mode((WIDTH, HEIGHT))
# Nombre de la ventana
pg.display.set_caption("Pong Game")
# Imagen de fondo
background = pg.image.load("assets/bg_blue.png").convert_alpha()
# objetos necesarios para el juego
ball = Ball("assets/ball.png",WIDTH,HEIGHT)
player1 = Racket("assets/racket1.png",WIDTH,HEIGHT)
opponentIA = IA("assets/racket2.png",WIDTH,HEIGHT)
scoreboard = Scoredboard()
# Bucle principal
run = True
while run:
pg.time.Clock().tick(FPS) # 60 actualizaciones de pantalla por segundo
ball.move()
ball.bounce(scoreboard)
player1.hit(ball)
player1.move()
opponentIA.move_ia(ball)
opponentIA.hit_ia(ball)
scoreboard.update()
SCREEN.blit(background, (0,0))
SCREEN.blit(ball.image, (ball.x, ball.y))
SCREEN.blit(player1.image, (player1.x, player1.y))
SCREEN.blit(opponentIA.image, (opponentIA.x, opponentIA.y))
SCREEN.blit(scoreboard.textRendered, (WIDTH/2 - scoreboard.font.size(scoreboard.text)[0]/2, 50))
for event in pg.event.get():
if event.type == QUIT:
run = False
# Deteccion de pulsacion de una tecla
if event.type == pg.KEYDOWN:
if event.key == pg.K_w:
player1.dir_y -= 5
if event.key == pg.K_s:
player1.dir_y += 5
# Deteccion de levantamiento de una tecla
if event.type == pg.KEYUP:
if event.key == pg.K_w:
player1.dir_y = 0
if event.key == pg.K_s:
player1.dir_y = 0
pg.display.flip()
pg.quit()
if __name__ == "__main__":
main()
| Miguel-99/Pong-Game | main.py | main.py | py | 2,428 | python | en | code | 0 | github-code | 13 |
21928417353 | import sys
input = sys.stdin.readline
T = int(input())
years = []
for _ in range(T):
start, end = map(int, input().split())
years.append([start, end])
sweep_line = []
for s, e in years:
sweep_line.append([s, -1, 1])
sweep_line.append([e, 1, -1])
sweep_line.sort()
print(sweep_line)
overlap = 0
answer = 0
for year, order, val in sweep_line:
overlap += val
print(overlap)
answer = max(answer, overlap)
print(answer)
| dyabk/competitive-programming | Prologin/back_to_the_future.py | back_to_the_future.py | py | 452 | python | en | code | 0 | github-code | 13 |
42070809361 | # -*- coding: UTF-8 -*-
import re
import sys
import os
file_name = sys.argv[1]
# Collect the text, footnotes and footnote order in separate lists
text_by_lines = []
footnotes = []
fnotes_order = []
# Go over file, look for markup for footnotes and sort to lists
with open(file_name, 'r') as fi:
ilines = fi.readlines()
for line in ilines:
if re.findall(r'^\[\^.+\]:', line):
footnotes.append((re.match(r'\[\^(.+?)\]', line).group(1),
re.sub(r'^\[\^.+\]: ', '', line).strip()))
else:
text_by_lines.append(line.strip())
fnotes = re.findall(r'\[\^(.+?)\]', line)
if fnotes is not None:
fnotes_order.extend(fnotes)
# Sort the footnotes by number
footnotes.sort(key=lambda x: fnotes_order.index(x[0]))
formatted = []
inlinecount = 1
# Replace the old footnote markers with the new ones
for line in text_by_lines:
if re.search(r'\[\^.+\]', line):
line_ = line.split()
for word in line_:
if re.search(r'\[\^.+\]', word):
inword = re.sub(r'\[\^.+\]', r'[^{}]' .format(inlinecount), word)
line_ = [inword if item == word else item for item in line_]
inlinecount += 1
line = ' '.join(line_)
formatted.append(line)
# Write formatted text and footnotes to file
base, suffix = os.path.splitext(file_name)
outname = base + "_ordered" + suffix
with open(outname, 'w') as fo:
for line in formatted:
fo.write(line + '\n')
for note in footnotes:
fnote = f'[^{footnotes.index(note) + 1}]: {note[1]}'
fo.write(fnote + '\n')
| questoph/lifehacks | sort-footnotes/sort_MD_footnotes.py | sort_MD_footnotes.py | py | 1,655 | python | en | code | 0 | github-code | 13 |
37176983824 | """
This file contains utility functions that improve the usage of MARGE.
make_dir: Creates a directory if it does not already exist.
limit_mem: Sets a limit on the amount of memory the program can use.
Not currently used by MARGE.
get_free_mem: Gets the amount of free memory on the system.
Not currently used by MARGE.
get_num_per_file: Calculates the number of cases per file in a data set.
Not currently used by MARGE.
data_set_size: Calculates the number of cases in a data set.
concatdat: Handles concatenation of non-contiguous data within a data set.
Not currently used by MARGE.
scale: Scales some data according to min, max, and a desired range.
descale: Descales some data according to min, max, and scaled range.
normalize: Normalizes some data according to mean & stdev.
denormalize: Denormalizes some data according to mean & stdev.
_float_feature: helper function to make Feature definition
_bytes_feature: helper function to make Feature definition
get_file_names: Find all file names in the data directory with a given file
extension.
make_TFRecord: Creates TFRecords representation of a data set.
get_TFR_file_names: Loads file names of the TFRecords files.
_parse_function: Helper function for loading TFRecords dataset objects.
load_TFdataset: Loads a TFRecords dataset for usage.
"""
import sys, os
import multiprocessing as mp
import functools
import glob
import numpy as np
import scipy.stats as ss
import tensorflow as tf
import loader as L
def make_dir(some_dir):
"""
Handles creation of a directory.
Inputs
------
some_dir: string. Directory to be created.
Outputs
-------
None. Creates `some_dir` if it does not already exist.
Raises an error if the directory cannt be created.
"""
try:
os.mkdir(some_dir)
except OSError as e:
if e.errno == 17: # Already exists
pass
else:
print("Cannot create folder '{:s}'. {:s}.".format(model_dir,
os.strerror(e.errno)))
sys.exit()
return
def get_num_per_file(foos, nfoo=10):
"""
Loads a few files to determine the number of entries per data file.
Not used by MARGE, but left here in case a user wants to use it for
some data set where the data files adhere to the same # of cases
per data file.
Inputs
------
foos: list, strings. Paths/to/data files.
nfoo: int. Number of files to consider for the calculation.
Outputs
-------
num_per_file: int. Number of entries per data file.
"""
num_in_file = np.zeros(min(len(foos), nfoo))
for i in range(min(len(foos), nfoo)):
num_in_file[i] = np.load(foos[i]).shape[0]
return int(ss.mode(num_in_file, None, 'raise')[0][0])
def count_cases(foo):
"""
Helper function for multiprocessing. Counts number of cases in file.
"""
return np.load(foo).shape[0]
def data_set_size(foos, ncores=1):
"""
Loads a data set and counts the total number of cases.
Inputs
------
foos: list, strings. Files of the data set.
Outputs
-------
ncases: int. Number of cases in the data set.
Notes
-----
It is assumed that each case in the data set occurs along axis 0.
"""
pool = mp.Pool(ncores)
# Count all cases in each file, in parallel
ncases = pool.map(count_cases, foos)
pool.close()
pool.join()
return np.sum(ncases)
def concatdat(xi, xlen, yi, ylen,
arr1, arr2):
"""
Helper function to handle slicing and concatenation of non-contiguous data.
Not used by MARGE, but left available in case a user wishes to utilize it.
Inputs
------
xi : array-like. Holds the starting indices for slices.
xlen: array-like. Holds the length of slices. Must be same shape as `xi`.
yi : array-like. Holds the starting indices for slices.
ylen: array-like. Holds the length of slices. Must be same shape as `yi`.
arr1: array-like. Data to be sliced according to `xi`, `xlen`,
`yi`, and `ylen`.
arr2: array-like. Data related to `arr1`.
Outputs
-------
x1: array. `arr1` sliced and concatenated according to `xi` and `xlen`.
x2: array. `arr2` ' ' ' ' ' ' ' ' ' ' ' ' ' '
y1: array. `arr1` ' ' ' ' ' ' ' ' ' `yi` and `ylen`.
y2: array. `arr2` ' ' ' ' ' ' ' ' ' ' ' ' ' '
"""
# Make the initial slices
x1 = arr1[xi[0]:xi[0]+xlen[0]]
x2 = arr2[xi[0]:xi[0]+xlen[0]]
y1 = arr1[yi[0]:yi[0]+ylen[0]]
y2 = arr2[yi[0]:yi[0]+ylen[0]]
# Concatenate non-contiguous regions of the array, if needed
if len(xi) > 1:
for i in range(1, len(xi)):
ibeg = xi[i]
iend = xi[i]+xlen[i]
x1 = np.concatenate((x1, arr1[ibeg:iend]))
x2 = np.concatenate((x2, arr2[ibeg:iend]))
if len(yi) > 1:
for i in range(1, len(yi)):
ibeg = yi[i]
iend = yi[i]+ylen[i]
y1 = np.concatenate((y1, arr1[ibeg:iend]))
y2 = np.concatenate((y2, arr2[ibeg:iend]))
return x1, x2, y1, y2
def scale(val, vmin, vmax, scalelims):
"""
Scales a value according to min/max values and scaling limits.
Inputs
------
val : array. Values to be scaled.
vmin : array. Minima of `val`.
vmax : array. Maxima of `val`.
scalelims: list, floats. [min, max] of range of scaled data.
Outputs
-------
Array of scaled data.
"""
return (scalelims[1] - scalelims[0]) * (val - vmin) / \
(vmax - vmin) + scalelims[0]
def descale(val, vmin, vmax, scalelims):
"""
Descales a value according to min/max values and scaling limits.
Inputs
------
val : array. Values to be descaled.
vmin : array. Minima of `val`.
vmax : array. Maxima of `val`.
scalelims: list, floats. [min, max] of range of scaled data.
Outputs
-------
Array of descaled data.
"""
return (val - scalelims[0]) / (scalelims[1] - scalelims[0]) * \
(vmax - vmin) + vmin
def normalize(val, vmean, vstd):
"""
Normalizes a value according to a mean and standard deviation.
Inputs
------
val : array. Values to be normalized.
vmean: array. Mean values of `val`.
vstd : array. Stdev values of `val`.
Outputs
-------
Array of normalized data.
"""
return (val - vmean) / vstd
def denormalize(val, vmean, vstd):
"""
Denormalizes a value according to a mean and standard deviation.
Inputs
------
val : array. Values to be denormalized.
vmean: array. Mean values of `val`.
vstd : array. Stdev values of `val`.
Outputs
-------
Array of denormalized data.
"""
return val * vstd + vmean
def _float_feature(value):
"""
Helper function to make feature definition more readable
"""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_feature(value):
"""
Helper function to make feature definition more readable
"""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def make_TFRecord(fname, files, inD, ilog, olog,
batch_size, e_batches, split=1, verb=1):
"""
Function to write TFRecords for large data sets.
Inputs
------
fname: string. Base name for TFRecords files
files: list, strings. Files to process into TFRecords.
inD : int. Dimension of the inputs/features
ilog : bool. Determines if to take the log of inputs/features
olog : bool. Determines if to take the log of outputs/targets
batch_size: int. Size of batches for training.
e_batches : int. Expected number of batches to be processed.
split: int. Determines the number of `files` to process before
starting a new TFRecords file.
verb : int. Verbosity level.
Outputs
-------
TFRecords files.
"""
if verb > 1:
print("\nWriting TFRecords file...")
# Track number of files left to load
filesleft = len(files)
# Track number of cases written to TFRecord
N_batches = 0
count = 0
for i in range(int(np.ceil(len(files)/split))):
# TFRecords file
thisfile = fname.replace('.tfrecords', '_'+str(i).zfill(3)+'.tfrecords')
writer = tf.python_io.TFRecordWriter(thisfile)
for j in range(min(split, filesleft)):
# Load file
x, y = L.load_data_file(files[i*split+j], inD, ilog, olog)
filesleft -= 1
# Print progress updates
if verb:
print(str(int(100/len(files)/split*(i + j/split))) + \
"% complete", end='\r')
# TF is lame and requires arrays to be 1D. Write each sequentially
for k in range(x.shape[0]):
# Check for Nans
if np.any(np.isnan(x[k])) or np.any(np.isnan(y[k])):
if verb:
print("Nan alert!", files[i*split+j], k)
continue
# Define feature
feature = {'x' : _bytes_feature(
tf.compat.as_bytes(x[k].tostring())),
'y' : _bytes_feature(
tf.compat.as_bytes(y[k].tostring()))}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=
feature))
# Serialize to string and write on the file
writer.write(example.SerializeToString())
# Make sure that the TFRecord has exactly N*batch_size entries
count += 1
if count==batch_size:
count = 0
N_batches += 1
if N_batches==e_batches:
writer.close()
if verb:
print("100% complete")
if verb > 1:
print("Ended writing TFRecords to ensure " + \
"N*batch_size entries.")
print("Writing TFRecords file complete.")
return
writer.close()
if verb:
print("100% complete")
if verb > 1:
print("Writing TFRecords file complete.")
print(N_batches, 'batches written,', e_batches, 'batches expected.')
print(count, 'remaining count.')
return
def _parse_function(proto, xlen, ylen,
x_mean=None, x_std=None, y_mean=None, y_std=None,
x_min=None, x_max=None, y_min=None, y_max=None,
scalelims=None):
"""
Helper function for loading TFRecords
Inputs
------
proto : object. Tensorflow Dataset.
xlen : int. Number of inputs.
ylen : int. Number of outputs.
x_mean: array. Mean values of input data.
x_std : array. Stdev values of input data.
y_mean: array. Mean values of output data.
y_std : array. Stdev values of output data.
x_min : array. Minima of input data.
x_max : array. Maxima of input data.
y_min : array. Minima of output data.
y_max : array. Maxima of output data.
scalelims: list, floats. [min, max] of range of scaled data.
Outputs
-------
x: Parsed inputs.
y: Parsed outputs.
"""
# Define the TFRecord
keys_to_features = {"x" : tf.FixedLenFeature([], tf.string),
"y" : tf.FixedLenFeature([], tf.string)}
# Load one example
parsed_features = tf.parse_single_example(proto, keys_to_features)
# Turn string into array
x = tf.decode_raw(parsed_features["x"], tf.float64)
y = tf.decode_raw(parsed_features["y"], tf.float64)
# Make sure it has the right shape
x = tf.reshape(x, (np.sum(xlen),))
y = tf.reshape(y, (np.sum(ylen),))
# Parameters to process data
norm = (x_mean, x_std, y_mean, y_std)
scaling = (x_min, x_max, y_min, y_max, scalelims)
# Set defaults if not specified
if any(v is None for v in norm):
x_mean = 0
x_std = 1
y_mean = 0
y_std = 1
if any(v is None for v in scaling):
x_min = 0
x_max = 1
y_min = 0
y_max = 1
scalelims = [0, 1]
# Normalize and scale
x = scale(normalize(x, x_mean, x_std), x_min, x_max, scalelims)
y = scale(normalize(y, y_mean, y_std), y_min, y_max, scalelims)
x = tf.cast(x, tf.float32)
y = tf.cast(y, tf.float32)
return x, y
def load_TFdataset(files, ncores, batch_size, buffer_size,
xlen, ylen,
x_mean=None, x_std=None, y_mean=None, y_std=None,
x_min=None, x_max=None, y_min=None, y_max=None,
scalelims=None, shuffle=False):
"""
Builds data loading pipeline for TFRecords.
Inputs
------
files : list, str. Path/to/files for TFRecords.
ncores : int. Number of cores to use for parallel loading.
batch_size : int. Batch size.
buffer_size: int. Number of times `batch_size` to use for buffering.
xlen : int. Number of inputs.
ylen : int. Number of outputs.
x_mean : float. Mean of inputs.
x_std : float. Standard deviation of inputs.
y_mean : float. Mean of outputs.
y_std : float. Standard deviation of outputs.
x_min : array. Minima of input data.
x_max : array. Maxima of input data.
y_min : array. Minima of output data.
y_max : array. Maxima of output data.
scalelims : list, floats. [min, max] of range of scaled data.
shuffle : bool. Determines whether to shuffle the order or not.
Outputs
-------
x_data: Parsed input data.
y_data: Parsed output data.
"""
# Make dataset
dataset = tf.data.TFRecordDataset(files)
# Make static parse_function
parse_function = functools.partial(_parse_function,
xlen=xlen, ylen=ylen,
x_mean=x_mean, x_std=x_std,
y_mean=y_mean, y_std=y_std,
x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max,
scalelims=scalelims)
# Maps the parser on every filepath in the array
dataset = dataset.map(parse_function, num_parallel_calls=ncores)
# Shuffle buffer -- train in random order
if shuffle:
dataset = dataset.shuffle(buffer_size*batch_size,
reshuffle_each_iteration=True)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.repeat() # Go forever! Until fit() stops it
dataset = dataset.prefetch(buffer_size)
# Make iterator to handle loading the data
iterator = dataset.make_one_shot_iterator()
# Create TF representation of the iterator
x_data, y_data = iterator.get_next()
return x_data, y_data
| exosports/MARGE | lib/utils.py | utils.py | py | 15,431 | python | en | code | 7 | github-code | 13 |
32846844906 | import os
import pandas as pd
from datetime import datetime, timedelta
def read_csv_file(file_path):
try:
return pd.read_csv(file_path)
except FileNotFoundError:
print(f"Error: {file_path} not found.")
exit(1)
except pd.errors.EmptyDataError:
print(f"Error: {file_path} is empty.")
exit(1)
except pd.errors.ParserError:
print(f"Error: Could not parse {file_path}.")
exit(1)
def get_unavailable_dates(row):
start_date = datetime.strptime(row['Wish_Start_Date'], '%Y-%m-%d').date()
end_date = datetime.strptime(row['Wish_End_Date'], '%Y-%m-%d').date()
delta = end_date - start_date
return [start_date + timedelta(days=i) for i in range(delta.days + 1)]
# Ask the user for input
start_date_input = input("Enter the start date (YYYY-MM-DD): ")
end_date_input = input("Enter the end date (YYYY-MM-DD): ")
call_type_input = input("Enter the call type to start with (0 for open, 1 for closed): ")
doctor_data_file_path = input("Enter the location of the doctor_data.csv file: ")
doctor_wishes_file_path = input("Enter the location of the doctor_wishes.csv file: ")
output_directory = input("Enter the directory where you want to save the output files: ")
# Validate user input and directory
try:
start_date = datetime.strptime(start_date_input, '%Y-%m-%d').date()
end_date = datetime.strptime(end_date_input, '%Y-%m-%d').date()
except ValueError:
print("Invalid date format. Please use YYYY-MM-DD.")
exit(1)
if call_type_input not in ['0', '1']:
print("Invalid call type. Please enter 0 for open or 1 for closed.")
exit(1)
if not os.path.exists(output_directory):
os.makedirs(output_directory)
call_type = int(call_type_input)
schedule_file_path = os.path.join(output_directory, 'schedule.csv')
monthly_counter_file_path = os.path.join(output_directory, 'monthly_counters.csv')
weekend_counter_file_path = os.path.join(output_directory, 'weekend_counters.csv')
# Read the CSV files
data = read_csv_file(doctor_data_file_path)
wishes = read_csv_file(doctor_wishes_file_path)
# Convert wishes to a dictionary of datetime objects
doctor_wishes_dict = {}
for index, row in wishes.iterrows():
doc = row['Doctor_Name']
unavailable_dates = get_unavailable_dates(row)
if doc in doctor_wishes_dict:
doctor_wishes_dict[doc].extend(unavailable_dates)
else:
doctor_wishes_dict[doc] = unavailable_dates
# Define the start and end date for scheduling (now based on user input)
delta = end_date - start_date
num_days = delta.days + 1
# Initialize variables
doctors = data['Doctor_Name'].tolist()
max_monthly_calls = num_days // len(doctors)
doctor_counters = {doctor: 0 for doctor in doctors}
monthly_counters = {doctor: 0 for doctor in doctors}
weekend_counters = {doctor: 0 for doctor in doctors}
last_weekend_worked = {doctor: None for doctor in doctors}
schedule = {}
previous_day_doctors = []
current_date = start_date
round_robin_index_open = 0
round_robin_index_closed = 0
# Generate the schedule
for day in range(num_days):
is_weekend = current_date.weekday() >= 5 # True if it's Saturday or Sunday
num_doctors_needed = 3 if call_type == 0 else 1
# Determine available doctors based on call type and other conditions
if call_type == 0:
available_doctors_df = data[(~data['Doctor_Name'].isin(previous_day_doctors)) & (~data['Doctor_Name'].isin(doctor_wishes_dict.get(current_date, [])))]
else:
available_doctors_df = data[(data['Status'] == 'n') & (~data['Doctor_Name'].isin(previous_day_doctors)) & (~data['Doctor_Name'].isin(doctor_wishes_dict.get(current_date, [])))]
available_doctors = available_doctors_df['Doctor_Name'].tolist()
# Filter out doctors who worked the last weekend, if it's a weekend
if is_weekend:
available_doctors = [doc for doc in available_doctors if last_weekend_worked[doc] != current_date - timedelta(days=7)]
if len(available_doctors) >= num_doctors_needed:
selected_doctors = []
if call_type == 0:
for _ in range(num_doctors_needed):
if round_robin_index_open >= len(available_doctors):
round_robin_index_open = 0
selected_doctor = available_doctors[round_robin_index_open]
selected_doctors.append(selected_doctor)
round_robin_index_open += 1
else:
for _ in range(num_doctors_needed):
if round_robin_index_closed >= len(available_doctors):
round_robin_index_closed = 0
selected_doctor = available_doctors[round_robin_index_closed]
selected_doctors.append(selected_doctor)
round_robin_index_closed += 1
for selected_doctor in selected_doctors:
doctor_counters[selected_doctor] = 1
monthly_counters[selected_doctor] += 1
if monthly_counters[selected_doctor] >= max_monthly_calls:
available_doctors.remove(selected_doctor)
# Update last_weekend_worked and weekend_counters if it's a weekend
if is_weekend:
for selected_doctor in selected_doctors:
last_weekend_worked[selected_doctor] = current_date
weekend_counters[selected_doctor] += 1
schedule[current_date] = selected_doctors
else:
least_assigned_doctors = sorted(doctors, key=lambda x: monthly_counters[x])[:num_doctors_needed]
selected_doctors = least_assigned_doctors
for selected_doctor in selected_doctors:
doctor_counters[selected_doctor] = 1
monthly_counters[selected_doctor] += 1
schedule[current_date] = selected_doctors
# Update the list of doctors who worked on the previous day's call
previous_day_doctors = selected_doctors
# Move to the next day and toggle call type
current_date += timedelta(days=1)
call_type = 1 - call_type
# Save the schedule and monthly counters to CSV files
schedule_df = pd.DataFrame(list(schedule.items()), columns=['Date', 'Doctors'])
schedule_df.to_csv(schedule_file_path, index=False)
monthly_counters_df = pd.DataFrame(list(monthly_counters.items()), columns=['Doctor_Name', 'Days_Worked'])
monthly_counters_df.to_csv(monthly_counter_file_path, index=False)
# Save weekend counters to a CSV
weekend_counters_df = pd.DataFrame(list(weekend_counters.items()), columns=['Doctor_Name', 'Weekends_Worked'])
weekend_counters_df.to_csv(weekend_counter_file_path, index=False)
# Display the schedule, monthly counters, and weekend counters
for date, doctors in schedule.items():
print(f"{date}: {', '.join(doctors)}")
print("Monthly counters for each doctor:")
for doctor, count in monthly_counters.items():
print(f"{doctor}: {count} days")
print("Weekend counters for each doctor:")
for doctor, count in weekend_counters.items():
print(f"{doctor}: {count} weekends")
| freekillgr/Scheduler_on_clinic | Schedule_calculator.py | Schedule_calculator.py | py | 7,141 | python | en | code | 0 | github-code | 13 |
36588209986 | class Solution:
def maximalSquare(self, matrix: List[List[str]]) -> int:
# similar to 1227 CountSquareSubmatriceswithAllOnes
if not matrix:
return 0
m, n = len(matrix), len(matrix[0])
dp = [[0 for _ in range(n)] for _ in range(m)]
max_len = 0
for i in range(m):
for j in range(n):
if matrix[i][j] == "1":
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
max_len = max(max_len, dp[i][j])
else:
dp[i][j] = 1 if matrix[i][j] == "1" else 0
return max_len**2
| ysonggit/leetcode_python | 0221_MaximalSquare.py | 0221_MaximalSquare.py | py | 638 | python | en | code | 1 | github-code | 13 |
24518693673 | #!/usr/bin/env python3
"""
Test for FileStorage module
"""
from models.engine.file_storage import FileStorage
from models.base_model import BaseModel
import datetime
import unittest
import models
from models.city import City
class TestStorage(unittest.TestCase):
"""
Testing FileStorage class
"""
def test_inst(self):
"""
Testing instance type
"""
storage = FileStorage()
self.assertIsInstance(storage, FileStorage)
def test_storage_all(self):
"""
Testing the all() method
"""
storage = FileStorage()
dic = storage.all()
self.assertIsInstance(dic, dict)
def test_new(self):
"""
Testing the new() method
"""
sample = City()
models.storage.new(sample)
self.assertIn("City." + sample.id, models.storage.all().keys())
self.assertIn(sample, models.storage.all().values())
def test_save(self):
"""
Testing the save() method
"""
sample = City()
models.storage.new(sample)
models.storage.save()
with open("file.json", "r") as file:
f_contents = file.read()
self.assertIn("City." + sample.id, f_contents)
def test_save_none(self):
"""
Testing save with None as arg
"""
with self.assertRaises(TypeError):
models.storage.save(None)
| Oluwamarcellus/AirBnB_test_repo | tests/test_models/test_engine/test_file_storage.py | test_file_storage.py | py | 1,430 | python | en | code | 0 | github-code | 13 |
70486567058 | import discord
from discord.ext import commands
import random
import json
import urllib.request
j_file = open("divinesecrets.txt")
vari = json.load(j_file)
j_file.close()
tkey = vari["tenorkey"]
class emotes(commands.Cog):
def __init__(self, bot):
self.bot = bot
# self.member = member
@commands.command()
async def gif(self, ctx, *, query):
url = f"https://g.tenor.com/v1/search?q={query}&key={tkey}&limit=8&media_filter=basic"
data = urllib.request.urlopen(url)
Json = json.load(data)
index = random.randrange(len(Json['results']))
await ctx.reply(Json['results'][index]['url'])
@commands.command()
async def hug(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("You can't hug yourself! lol")
return
urls = ['https://media.tenor.com/images/4d5a77b99ab86fc5e9581e15ffe34b5e/tenor.gif',
'https://media1.tenor.com/images/11b756289eec236b3cd8522986bc23dd/tenor.gif?itemid=10592083',
'https://media1.tenor.com/images/452bf03f209ca23c668826ffa07ea6a7/tenor.gif?itemid=15965620',
'https://media1.tenor.com/images/fd47e55dfb49ae1d39675d6eff34a729/tenor.gif?itemid=12687187',
'https://media1.tenor.com/images/f3ffd3669c13ee8d091a6b583976efe9/tenor.gif?itemid=9322908',
'https://cdn.weeb.sh/images/BysjuO7D-.gif',
'https://media1.tenor.com/images/1d94b18b89f600cbb420cce85558b493/tenor.gif?itemid=15942846',
'https://media1.tenor.com/images/1069921ddcf38ff722125c8f65401c28/tenor.gif?itemid=11074788']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just hugged __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def spank(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("Uhh that's a weird fetish to spank yourself.")
return
urls = ['https://media1.tenor.com/images/3c161bd7d6c6fba17bb3e5c5ecc8493e/tenor.gif?itemid=5196956',
'https://media1.tenor.com/images/d40977fe97c6c94215a9b84f990357f7/tenor.gif?itemid=7391212',
'https://media.tenor.com/images/5013920fb05e45636900276eea66471f/tenor.gif',
'https://media.tenor.com/images/899d52015a05c0cdac511090d50f743a/tenor.gif',
'https://media1.tenor.com/images/a227b6044bd7dc0b21bb1c3fe4a536a5/tenor.gif?itemid=5648461',
'https://media1.tenor.com/images/948e076842485d38b69431fbcb5c14d2/tenor.gif?itemid=13008163']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just spanked __***{member.name}***__'), color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command(aliases = ['hbd'])
async def bdaywish(self, ctx, member: discord.Member):
urls = ['https://media.tenor.com/images/e37ae589afa0cffe7c9957bee26e36cc/tenor.gif',
'https://media.tenor.com/images/72fead26968d18a5846f02298dacb3b3/tenor.gif',
'https://media.tenor.com/images/ee1cd9269f1872a5eb31cef9b86a20cd/tenor.gif',
'https://media.tenor.com/images/ac4f49e01b8d289c16271e2187ee62d8/tenor.gif',
'https://media.tenor.com/images/25d627f3e0f09fe712f8b9fd4bd675cb/tenor.gif',
'https://media.tenor.com/images/0b6f0b738d777f1f393492918ef94eda/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name} and NotSoBasicBot***__ are wishing **Happy Birthday** to __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def punch(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("You don't wanna punch yourself do you? lol")
else:
urls = ['https://i.chzbgr.com/full/6531699968/h446929A1/ya-like-my-zangief-impression-doc',
'https://media0.giphy.com/media/AlsIdbTgxX0LC/giphy.gif',
'https://media1.giphy.com/media/11HeubLHnQJSAU/giphy.gif?cid=ecf05e47ducbjg7i13ay8wyxrqu4ir3x5vovozl5stw4e1um&rid=giphy.gif',
'https://media2.giphy.com/media/xULW8EM7Br1usb0s9O/giphy.gif?cid=ecf05e47ducbjg7i13ay8wyxrqu4ir3x5vovozl5stw4e1um&rid=giphy.gif',
'https://media3.giphy.com/media/GoN89WuFFqb2U/giphy.gif?cid=ecf05e47newmpccn5poukaf496q8dx12fazspr7v86owyic6&rid=giphy.gif',
'https://media3.giphy.com/media/RkLaH1ptACyAzQ1dWj/giphy.gif?cid=ecf05e47b99bf549bf0f9d706d40d906ae4686eddd55d337&rid=giphy.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just punched __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def slap(self, ctx, member: discord.Member = None):
if member is None:
urls = ['https://media4.giphy.com/media/S7EOLaVLLTPNuIFUHT/giphy.gif?cid=ecf05e477iprb3tp8c173xvxsym9geky10acmpx69zojufir&rid=giphy.gif',
'https://media2.giphy.com/media/2nGfl4QfpCtW/giphy.gif?cid=ecf05e477iprb3tp8c173xvxsym9geky10acmpx69zojufir&rid=giphy.gif',
'https://media1.giphy.com/media/q8AiNhQJVyDoQ/giphy.gif?cid=ecf05e47df40091nuwwrqdo8nbw6cy2r2xbi0ot2a4vwx7rs&rid=giphy.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just slapped themselves. Wierd!') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
else:
urls = ['https://media4.giphy.com/media/Gf3AUz3eBNbTW/giphy.gif?cid=ecf05e470ab7ia5vgss64ntvnmwj2v0b6dg8q2yxyft5uyjy&rid=giphy.gif',
'https://media1.giphy.com/media/k1uYB5LvlBZqU/giphy.gif?cid=ecf05e47e1efc2fb975b942072a24b70e9faef0d90ed3e81&rid=giphy.gif',
'https://media1.giphy.com/media/10DRaO76k9sgHC/giphy.gif?cid=ecf05e470ab7ia5vgss64ntvnmwj2v0b6dg8q2yxyft5uyjy&rid=giphy.gif',
'https://media0.giphy.com/media/3XlEk2RxPS1m8/giphy.gif?cid=ecf05e47486a0f0cef11ddd3b254e9c37ab326f8b254bda3&rid=giphy.gif',
'https://media0.giphy.com/media/htiVRuP7N0XK/giphy.gif?cid=ecf05e477dd392a9f80fc10b58ae47c3af36eea496449063&rid=giphy.gif',
'https://media2.giphy.com/media/3o752gPI09ZLYk84Ok/giphy.gif?cid=ecf05e47ead92fdffe9293b39f8d43042e4eeb13b5bbb84f&rid=giphy.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just slapped __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def kill(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("You don't wanna k-kill yourself do you? Don't worry things will get better (≧∇≦)ノ")
else:
urls = ['https://media1.tenor.com/images/ff2dcd44504000e320c21ae5682b5369/tenor.gif?itemid=5749160',
'https://media.tenor.com/images/e4f4de39be542c0820a7725b767ec1a0/tenor.gif',
'https://media.tenor.com/images/dba9097be6354e9be123441eacdad947/tenor.gif',
'https://media.tenor.com/images/edf55b40599fb382f7a8c87e609d5094/tenor.gif',
'https://media.tenor.com/images/5b25354209f6de4b064f0833f5eca8ad/tenor.gif',
'https://media.tenor.com/images/557bcc935fe237761da4963e3213bd2e/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just killed __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
embed.set_footer(text=f"But with my divine powers I resurrect {member.name} to get killed again!")
await ctx.send(embed=embed)
@commands.command()
async def nom(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("You don't wanna eat yourself do you? lol")
else:
urls = ['https://media.tenor.com/images/333c4f19849451c7e1ddff454c9f9372/tenor.gif',
'https://media.tenor.com/images/12aaaf60c46d563e3f8f2609f1df3c53/tenor.gif',
'https://media1.tenor.com/images/dc499a9859fab7fee5a23aebfc646dbf/tenor.gif?itemid=11833453',
'https://media.tenor.com/images/e5c65ea4d878d4165e682d7b984ab48b/tenor.gif',
'https://media.tenor.com/images/8260bc43f1522aa93616ff5a4389f139/tenor.gif',
'https://media.tenor.com/images/f9bba4a32a2f9bde7faa8c334aeaa4e5/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just nommed __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def pat(self, ctx, member: discord.Member = None):
if member is None:
await ctx.send("You don't wanna pat yourself do you? lol")
else:
urls = ['https://media.tenor.com/images/0203a7bdba3302f6d8a473ba461e1581/tenor.gif',
'https://media.tenor.com/images/1da6bb86ef23dc4dff5051209843a296/tenor.gif',
'https://media1.tenor.com/images/f79a9ec48bde0e592e55447b17ecfbad/tenor.gif?itemid=8053566',
'https://media1.tenor.com/images/f41b3974036070fd1c498acf17a3a42e/tenor.gif?itemid=14751753',
'https://media1.tenor.com/images/5466adf348239fba04c838639525c28a/tenor.gif?itemid=13284057',
'https://media.tenor.com/images/0be5d465674b432bcf0bae0056f5621f/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ just patted __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
@commands.command()
async def dance(self, ctx, member: discord.Member=None):
if member is None:
urls = ['https://media3.giphy.com/media/6fScAIQR0P0xW/giphy.gif?cid=ecf05e47tg1vl88unml2kr0tg3sn01482oa1jqstgs7d028u&rid=giphy.gif',
'https://i.imgur.com/wstXmJw.gif',
'https://media1.tenor.com/images/e19a05faf32c511572acd08a38bebdd6/tenor.gif',
'https://i.pinimg.com/originals/1c/79/ac/1c79ac50b06bb42a24058bf13c162a3e.gif',
'https://media.tenor.com/images/4fd49de4149a6d348e04f2465a3970af/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ is dancing.') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
else:
urls = ['https://media1.giphy.com/media/gCy8PslyGfBu0/giphy.gif?cid=ecf05e47xpjyr0upvq7tex2zj2hwp90md5g7hrn9bsj78dsv&rid=giphy.gif',
'https://media4.giphy.com/media/daBzBXPM1bSdpEW6MV/giphy.gif?cid=ecf05e47xpjyr0upvq7tex2zj2hwp90md5g7hrn9bsj78dsv&rid=giphy.gif',
'https://media0.giphy.com/media/kXfImJBeF6S7m/giphy.gif?cid=ecf05e474mru95iml5dnq7vydfkcdd0kktcbl7zkjq45hgwt&rid=giphy.gif',
'https://media3.giphy.com/media/PGp3hlI74uVxK/giphy.gif?cid=ecf05e47xpjyr0upvq7tex2zj2hwp90md5g7hrn9bsj78dsv&rid=giphy.gif']
# 'https://media.tenor.com/images/5b25354209f6de4b064f0833f5eca8ad/tenor.gif',
# 'https://media.tenor.com/images/557bcc935fe237761da4963e3213bd2e/tenor.gif']
embed = discord.Embed(title=(f'__***{ctx.author.name}***__ is dancing with __***{member.name}***__') ,color=random.randint(0x000000, 0xFFFFFF))
embed.set_image(url=random.choice(urls))
await ctx.send(embed=embed)
async def setup(bot):
await bot.add_cog(emotes(bot)) | Arg0naut18/NSB | cogs/emotes.py | emotes.py | py | 12,064 | python | en | code | 2 | github-code | 13 |
27629538984 | import logging
import os
import random
import torch
import dgl
import pytorch_lightning as pl
from src.datamodules.datasets.graph_dataset import GraphDataset
from src.datamodules.datasets.dgl_dataset import MyDGLDataset
from src.datamodules.samplers.dgl_sampler import MyKNNMultiLayerNeighborSampler
from dgl.dataloading import DataLoader as DGLDataLoader
from torch.utils.data import DataLoader
from src.utils.states import Dynamic_neigh_level
class GMVAEDataModule(pl.LightningDataModule):
def __init__(
self,
data_dir,
dataset_dir,
data_type,
in_type,
out_type,
compared_type,
train_val_test_split,
data_file_name=None,
count_key=None,
num_classes="auto",
num_hvg=2048,
lib_norm=True,
n_pc=50,
batch_size=256,
num_workers=0,
persistent_workers=False,
prefetch_factor=2,
pin_memory=False,
rec_neigh_num=None,
rec_mask_neigh_threshold=None,
use_cell_mask=False,
max_dynamic_neigh=False,
dynamic_neigh_level=Dynamic_neigh_level.unit,
unit_fix_num=None,
unit_dynamic_num=None,
k=18,
test_with_gt_sp=False,
forward_neigh_num=None,
gat_layer_num=0,
exchange_forward_neighbor_order=False,
sample_id="sample_id",
weighted_neigh=False,
keep_tiles=False,
supervise_cat=False,
seed=42,
device="auto",
load_whole_graph_on_gpu=False,
z_scale=2.,
n_jobs="mean",
use_ddp=False,
**kwargs,
):
"""DataModule of GMGATModel, specify the dataloaders of
Args:
graph_dataset_roots (list): list of the graph datasets path.
"""
super().__init__()
self.data_dir = data_dir
self.dataset_dir = dataset_dir
self.data_type = data_type
self.in_type = in_type
self.out_type = out_type
self.compared_type = compared_type
self.count_key = count_key
self.train_val_test_split = train_val_test_split
self.data_file_name = data_file_name
self.num_classes = num_classes
self.num_hvg = num_hvg
self.lib_norm = lib_norm
self.n_pc = n_pc
self.batch_size = batch_size
self.persistent_workers = persistent_workers
self.prefetch_factor = prefetch_factor
self.pin_memory = pin_memory
self.max_dynamic_neigh = max_dynamic_neigh
self.dynamic_neigh_level = Dynamic_neigh_level[dynamic_neigh_level]
self.unit_fix_num = unit_fix_num
self.unit_dynamic_num = unit_dynamic_num
self.start_use_domain_neigh = False
self.kept_val_dataloader = None
self.kept_test_dataloader = None
self.k = k
if rec_neigh_num is not None:
self.rec_neigh_num = rec_neigh_num
else:
self.rec_neigh_num = k
self.rec_mask_neigh_threshold = rec_mask_neigh_threshold
if forward_neigh_num is not None:
self.forward_neigh_num = forward_neigh_num
else:
self.forward_neigh_num = k
self.gat_layer_num = gat_layer_num
self.use_cell_mask = use_cell_mask
self.test_with_gt_sp = test_with_gt_sp
self.exchange_forward_neighbor_order = exchange_forward_neighbor_order
self.sample_id = str(sample_id)
if self.sample_id.startswith("c_"):
self.sample_id = self.sample_id[2:]
self.weighted_neigh = weighted_neigh
self.keep_tiles = keep_tiles
self.supervise_cat = supervise_cat
self.seed = seed if seed is not None else random.randint(0, 100000)
self.load_whole_graph_on_gpu = load_whole_graph_on_gpu
self.use_ddp = use_ddp
self.z_scale = z_scale
if n_jobs == "mean":
try:
num_gpu = int(os.popen("nvidia-smi|grep Default|wc -l").read().strip())
self.n_jobs = os.cpu_count() // num_gpu
except:
self.n_jobs = os.cpu_count()
elif n_jobs == "all":
self.n_jobs = os.cpu_count()
else:
self.n_jobs = n_jobs
if num_workers == "mean":
try:
num_gpu = int(os.popen("nvidia-smi|grep Default|wc -l").read().strip())
self.num_workers = os.cpu_count() // num_gpu
except:
self.num_workers = os.cpu_count()
elif num_workers == "all":
self.num_workers = os.cpu_count()
else:
self.num_workers = num_workers
if device == "auto":
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
self.device = torch.device(device)
if self.max_dynamic_neigh:
if self.device.type.startswith("cuda"):
self.num_workers = 0
self.dgl_device = torch.device("cuda")
self.persistent_workers = False
if not self.load_whole_graph_on_gpu:
self.use_uva = True
else:
self.use_uva = False
else:
self.dgl_device = torch.device("cpu")
self.use_uva = False
def prepare_data(self):
pass
def setup(self, stage=None):
train_dataset = GraphDataset(
data_dir=self.data_dir,
dataset_dir=self.dataset_dir,
data_type=self.data_type,
in_type=self.in_type,
out_type=self.out_type,
compared_type=self.compared_type,
count_key=self.count_key,
num_classes=self.num_classes,
data_file_name=self.data_file_name,
num_hvg=self.num_hvg,
lib_norm=self.lib_norm,
n_pc=self.n_pc,
max_dynamic_neigh=self.max_dynamic_neigh,
dynamic_neigh_level=self.dynamic_neigh_level,
unit_fix_num=self.unit_fix_num,
unit_dynamic_num=self.unit_dynamic_num,
k=self.k,
rec_neigh_num=self.rec_neigh_num,
rec_mask_neigh_threshold=self.rec_mask_neigh_threshold,
use_cell_mask=self.use_cell_mask,
test_with_gt_sp=self.test_with_gt_sp,
forward_neigh_num=self.forward_neigh_num,
exchange_forward_neighbor_order=self.exchange_forward_neighbor_order,
sample_id=self.sample_id,
weighted_neigh=self.weighted_neigh,
keep_tiles=self.keep_tiles,
supervise_cat=self.supervise_cat,
z_scale=self.z_scale,
device=self.device,
seed=self.seed,
)
self.data_train = train_dataset
test_dataset = train_dataset
self.data_val = test_dataset
self.data_test = test_dataset
if self.max_dynamic_neigh:
dgl_train_dataset = MyDGLDataset(
in_type=self.in_type,
out_type=self.out_type,
count_key=self.data_val.count_key,
sample_id=self.sample_id,
dynamic_neigh_nums=self.data_val.dynamic_neigh_nums,
dynamic_neigh_level=self.dynamic_neigh_level,
unit_fix_num=self.unit_fix_num,
unit_dynamic_num=self.unit_dynamic_num,
start_use_domain_neigh=self.start_use_domain_neigh,
adata=self.data_val.adata,
load_whole_graph_on_gpu=self.load_whole_graph_on_gpu,
seed=self.seed,
)
self.dgl_data_train = dgl_train_dataset
dgl_test_dataset = dgl_train_dataset
self.dgl_data_val = dgl_test_dataset
self.dgl_data_test = dgl_test_dataset
self.dgl_indices = torch.arange(self.dgl_data_train[0].num_nodes(), device=self.dgl_device)
if self.forward_neigh_num:
# assert self.forward_neigh_num == self.rec_neigh_num
self.train_sampler = MyKNNMultiLayerNeighborSampler(fanouts=[self.rec_neigh_num, self.forward_neigh_num])
self.val_test_sampler = MyKNNMultiLayerNeighborSampler(fanouts=[self.forward_neigh_num])
else:
self.train_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
self.val_test_sampler = dgl.dataloading.MultiLayerFullNeighborSampler(1)
if self.batch_size == "all":
self.batch_size = len(self.data_val.adata)
self.train_drop_last = True if self.batch_size <= len(self.data_val.adata) else False
if self.trainer.is_global_zero:
logging.info(self.data_train.adata)
def train_dataloader(self):
if self.max_dynamic_neigh:
dgl_train_dataset = MyDGLDataset(
in_type=self.in_type,
out_type=self.out_type,
count_key=self.data_val.count_key,
sample_id=self.sample_id,
dynamic_neigh_nums=self.data_val.dynamic_neigh_nums,
dynamic_neigh_level=self.dynamic_neigh_level,
unit_fix_num=self.unit_fix_num,
unit_dynamic_num=self.unit_dynamic_num,
start_use_domain_neigh=self.start_use_domain_neigh,
adata=self.data_val.adata,
load_whole_graph_on_gpu=self.load_whole_graph_on_gpu,
seed=self.seed,
)
self.dgl_data_train = dgl_train_dataset
dgl_test_dataset = dgl_train_dataset
self.dgl_data_val = dgl_test_dataset
self.dgl_data_test = dgl_test_dataset
return DGLDataLoader(
self.dgl_data_train[0],
self.dgl_indices,
self.train_sampler,
device=self.dgl_device,
use_uva=self.use_uva,
batch_size=self.batch_size,
num_workers=self.num_workers,
drop_last=self.train_drop_last,
persistent_workers=False,
use_prefetch_thread=False,
shuffle=True,
use_ddp=self.use_ddp,
)
else:
return DataLoader(
dataset=self.data_train,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
prefetch_factor=self.prefetch_factor,
pin_memory=self.pin_memory,
drop_last=self.train_drop_last,
shuffle=True,
)
def val_dataloader(self):
# print("start val_dataloader")
if self.max_dynamic_neigh:
if self.forward_neigh_num:
return DGLDataLoader(
self.dgl_data_val[0],
self.dgl_indices,
self.val_test_sampler,
device=self.dgl_device,
use_uva=self.use_uva,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=False,
use_prefetch_thread=False,
shuffle=False,
use_ddp=self.use_ddp,
)
else:
if self.kept_val_dataloader is None:
self.kept_val_dataloader = DGLDataLoader(
self.dgl_data_val[0],
self.dgl_indices,
self.val_test_sampler,
device=self.dgl_device,
use_uva=self.use_uva,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
shuffle=False,
use_ddp=self.use_ddp,
)
else:
self.kept_val_dataloader = DataLoader(
dataset=self.data_val,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
prefetch_factor=self.prefetch_factor,
pin_memory=self.pin_memory,
shuffle=False,
)
# print("stop val_dataloader")
return self.kept_val_dataloader
def test_dataloader(self):
if self.max_dynamic_neigh:
if self.forward_neigh_num:
return DGLDataLoader(
self.dgl_data_test[0],
self.dgl_indices,
self.val_test_sampler,
device=self.dgl_device,
use_uva=self.use_uva,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=False,
use_prefetch_thread=False,
shuffle=False,
use_ddp=self.use_ddp,
)
else:
if self.kept_test_dataloader is None:
self.kept_test_dataloader = DGLDataLoader(
self.dgl_data_test[0],
self.dgl_indices,
self.val_test_sampler,
device=self.dgl_device,
use_uva=self.use_uva,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
shuffle=False,
use_ddp=self.use_ddp,
)
else:
self.kept_test_dataloader = DataLoader(
dataset=self.data_test,
batch_size=self.batch_size,
num_workers=self.num_workers,
persistent_workers=self.persistent_workers,
prefetch_factor=self.prefetch_factor,
pin_memory=self.pin_memory,
shuffle=False,
)
return self.kept_test_dataloader
| ericcombiolab/stDyer | src/datamodules/gmvae_datamodule.py | gmvae_datamodule.py | py | 14,119 | python | en | code | 1 | github-code | 13 |
35362245261 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# User Instructions
#
#The following code provides the filtering, planning, localization, smoothing functions
# for a robots moving in a 2 dimmentional grid as provided
#
from math import *
import random
from Class_plan import plan
from Robot_Class import robot
from Particle_Filter_Class import particles
from Parameter_Tuning import twiddle
# following are the noise paameters
steering_noise = 0.1
distance_noise = 0.03
measurement_noise = 0.3
# run: runs control program for the robot
#
def run(grid, goal, spath, params, printflag = False, speed = 0.1, timeout = 1000):
myrobot = robot()
myrobot.set(0., 0., 0.)
myrobot.set_noise(steering_noise, distance_noise, measurement_noise)
filter = particles(myrobot.x, myrobot.y, myrobot.orientation,
steering_noise, distance_noise, measurement_noise)
cte = 0.0
err = 0.0
N = 0
index = 0 # index into the path
while not myrobot.check_goal(goal) and N < timeout:
diff_cte = - cte
# the present robot estimate
estimate = filter.get_position()
x = estimate[0]
y = estimate[1]
x1 = spath[index][0]
x2 = spath[index+1][0]
y1 = spath[index][1]
y2 = spath[index+1][1]
Rx = x - x1
Ry = y - y1
del_x = x2 - x1
del_y = y2 - y1
cte = (Ry * del_x - Rx * del_y) / ((del_x**2) + (del_y**2))
#function for judging change of path of the robot
u=(Rx * del_x + Ry * del_y)/((del_x**2) + (del_y**2))
#if the paths changes then increase index while keeping it in range.
if u>1 and (index+2)<len(spath):
index+=1
diff_cte += cte
steer = - params[0] * cte - params[1] * diff_cte
myrobot = myrobot.move(grid, steer, speed)
filter.move(grid, steer, speed)
Z = myrobot.sense()
filter.sense(Z)
if not myrobot.check_collision(grid):
print ('##### Collision ####')
err += (cte ** 2)
N += 1
if printflag:
print (myrobot, cte, index, u)
return [myrobot.check_goal(goal), myrobot.num_collisions, myrobot.num_steps]
# ------------------------------------------------
#
# this is our main routine
#
def main(grid, init, goal, steering_noise, distance_noise, measurement_noise,
weight_data, weight_smooth, p_gain, d_gain):
path = plan(grid, init, goal)
path.astar()
path.smooth(weight_data, weight_smooth)
return run(grid, goal, path.spath, [p_gain, d_gain])
# ------------------------------------------------
#
# input data and parameters
#
# grid format:
# 0 = navigable space
# 1 = occupied space
grid = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
steering_noise = 0.1
distance_noise = 0.03
measurement_noise = 0.3
weight_data = 0.1
weight_smooth = 0.2
p_gain = 2.0
d_gain = 6.0
weight_data, weight_smooth, p_gain, d_gain = twiddle([weight_data, weight_smooth, p_gain, d_gain])
print (main(grid, init, goal, steering_noise, distance_noise, measurement_noise,
weight_data, weight_smooth, p_gain, d_gain))
# In[ ]:
| botAshar/Robot-car | Robot_Car.py | Robot_Car.py | py | 3,417 | python | en | code | 0 | github-code | 13 |
729528220 | import os
from gensim.models.doc2vec import Doc2Vec
import time
from doc2vec_helper import DocumentsIterator, EpochLogger
outdir = '/data/doc2vec/'
# Choose hyperparameters
params = {'window': 2, # max distance between current word and predicted word
'min_count': 2, # minimum number of occurrences of words in vocab
'workers': len(os.sched_getaffinity(0)), # number of available cpus
'vector_size': 300, # output (feature) dimensionality
'epochs': 100 # number of training iterations over dataset
}
print(params)
# Train model, we can either pass Iterator or Array
documents = DocumentsIterator()
version = str(int(time.time()))
epoch_logger = EpochLogger(outdir + 'model_' + version) # stores model every epoch
model = Doc2Vec(documents, callbacks=[epoch_logger], **params)
model.delete_temporary_training_data(
keep_doctags_vectors=True, keep_inference=True)
print('Model trained')
# Save model
fname = outdir + 'model_' + version
model.save(fname)
print('Model saved to ' + fname)
# Build embeddings
# We write directly to output file to avoid data loss due to unexpected errors
with open(outdir + 'results_' + version + '.csv', 'w') as f:
for document in documents:
f.write(document.tags[0] + ',' + ','.join(['%.16f' %
num for num in model.infer_vector(document.words)]) + '\n')
print('Saved embeddings to ' + outdir)
| alan-turing-institute/DSG-TNA-UKGWA | src/doc2vec.py | doc2vec.py | py | 1,450 | python | en | code | 2 | github-code | 13 |
23099554586 | from typing import Tuple
import time
def categorizeTag(word: str) -> tuple:
# Remove case where there is only a symbol # or @ in the word
if len(word) <= 1:
return [None, word]
if word.find('#') == 0:
return ['category', word[1:]]
if word.find('@') == 0:
return ['person', word[1:]]
return [None, word]
def parseEventString(eventString: str) -> dict:
wordList = eventString.split(" ")
tagObj = {'category': [], 'person': []}
for word in wordList:
tag = categorizeTag(word)
# Dont have tag
if tag[0] is None:
continue
tagObj[tag[0]].append(tag[1])
tagObj['category'].sort()
tagObj['person'].sort()
return tagObj
def binarySearch(targetList, target:str) -> bool:
start = 0
end = len(targetList) - 1
while start <= end:
mid = start + (end - start) // 2
if targetList[mid] == target:
return True
if targetList[mid] > target:
# Search Left
end = mid - 1
else:
# Search Right
start = mid + 1
return False
def searchLog(category: str, tag:str, logList: list) -> list:
eventList = []
eventCnt = 0
if category == 'time':
return logList[-10:]
if category == 'category' or category == 'person':
for log in reversed(logList):
if eventCnt >= 10:
break
if binarySearch(log['tagObj'][category], tag):
eventCnt = eventCnt + 1
eventList.append(log)
return eventList
| Nicochung/python-event-log | helper.py | helper.py | py | 1,594 | python | en | code | 0 | github-code | 13 |
24684860459 | #!/usr/bin/env python
# coding: utf-8
# 1.Write a Python Program to Add Two Matrices?
# In[9]:
matrix1 = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
matrix2 = [[10, 11, 12],
[13, 14, 15],
[16, 17, 18]]
result_matrix = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
for i in range(len(matrix1)):
for j in range(len(matrix1[0])):
result_matrix[i][j] = matrix1[i][j] + matrix2[i][j]
for row in result_matrix:
print(row)
# 2.Write a Python Program to Multiply Two Matrices?
# In[11]:
for i in range(len(matrix1)):
for j in range(len(matrix2[0])):
for k in range(len(matrix2)):
result_matrix[i][j] += matrix1[i][k] * matrix2[k][j]
for row in result_matrix:
print(row)
# 3.Write a Python Program to Transpose a Matrix?
# In[22]:
matrix = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
result_matrix = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0]]
for i in range(len(matrix)):
for j in range(len(matrix[0])):
result_matrix[j][i] = matrix[i][j]
for row in result_matrix:
print(row)
# 4.Write a Python Program to Sort Words in Alphabetic Order?
# In[23]:
words = ["apple", "dog", "elephant"]
sorted_words = sorted(words)
for word in sorted_words:
print(word)
# 5.Write a Python Program to Remove Punctuation From a String?
# In[24]:
import string
text = "This, is a test string."
text_without_punct = text.translate(str.maketrans("", "", string.punctuation))
print(text_without_punct)
# In[ ]:
| Rajn013/ASSIGNMENT-08PROGRAMMING | Untitled40.py | Untitled40.py | py | 1,568 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.