seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20485480675 | from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.zp4rker.basic.plugins.module_utils import calculator
def run_module():
module_args = dict(
x=dict(type='int', required=True),
operation=dict(type='str', required=True, choices=['add', 'subtract', 'multiply', 'divide']),
y=dict(type='int', required=True)
)
result = dict(
changed=False,
equation=None,
answer=None
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=False
)
x = module.params['x']
y = module.params['y']
if module.params['operation'] == 'add':
result['equation'] = '{} + {}'.format(x, y)
result['answer'] = calculator.add(x, y)
elif module.params['operation'] == 'subtract':
result['equation'] = '{} − {}'.format(x, y)
result['answer'] = calculator.subtract(x, y)
elif module.params['operation'] == 'multiply':
result['equation'] = '{} × {}'.format(x, y)
result['answer'] = calculator.multiply(x, y)
else:
result['equation'] = '{} ÷ {}'.format(x, y)
if y == 0:
module.fail_json(msg='You cannot divide by zero!', equation=result['equation'])
result['answer'] = calculator.divide(x, y)
module.exit_json(**result)
def main():
run_module()
if __name__ == '__main__':
main() | zp4rker/ansible-basic-collection | plugins/modules/calculate.py | calculate.py | py | 1,339 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ansible.module_utils.basic.AnsibleModule",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "ansible_collections.zp4rker.basic.plugins.module_utils.calculator.add",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "ansible_collections.zp4rker.basic.... |
2706087078 | import hashlib
import itertools
import json
import os
import time
from typing import Any, Dict, List
import googlemaps
PLACE_TYPES = [
"Gym",
"Park",
"Café",
"Supermarket",
"Restaurant",
"Vegetarian Restaurant",
"Burger Restaurant",
]
DISTRICT_NAMES = [
"Neuehrenfeld, Cologne, Germany",
"Sülz, Cologne, Germany",
"Raderthal, Cologne, Germany",
"Müngersdorf, Cologne, Germany",
"Südstadt, Cologne, Germany",
"Zollstock, Cologne, Germany",
]
TEST_TRIGGER = {
"place_types": ["Restaurant"],
"district_names": ["Neuehrenfeld, Cologne, Germany"],
}
FIVETRAN_TRIGGER = {"agent": "Test", "state": {}, "secrets": {}}
class MapsData:
def __init__(self, api_key: str) -> None:
self.gmaps = googlemaps.Client(key=api_key)
def get_places_table(
self, place_type: str, district_name: str
) -> List[Dict[str, Any]]:
"""Requests places in a district from googlemaps API and assemples results in a flattened dictionary
Args:
place_type (str): Place type to search for, e.g. "Restaurant" or "Gym"
district_name (str): Name of the district to search for, e.g. "Neuhrenfeld, Cologne, Germany"
Returns:
List[Dict]: List of places in or near the district
"""
query = f"{place_type} near {district_name}"
places = self.gmaps.places(query=query)
places_table = []
while "next_page_token" in places:
for place in places["results"]:
place_record = self._create_place_record(
place, place_type, district_name
)
places_table.append(place_record)
time.sleep(2)
places = self.gmaps.places(
query=query, page_token=places["next_page_token"]
)
places_table = self._add_distances_from_center(places_table, district_name)
return places_table
@staticmethod
def _create_place_record(
place: Dict[str, Any], place_type: str, district_name: str
) -> Dict[str, Any]:
place_record = {
k: v
for k, v in place.items()
if k
in [
"place_id",
"name",
"formatted_address",
"rating",
"user_ratings_total",
]
}
place_id = place_record.pop("place_id")
id_string = (
(place_id + place_type + district_name).replace(",", "").replace(" ", "")
)
place_record["id"] = hashlib.md5(id_string.encode("utf-8")).hexdigest()
place_record["gmaps_place_id"] = place_id
place_record["location_lat"] = place["geometry"]["location"]["lat"]
place_record["location_lng"] = place["geometry"]["location"]["lng"]
place_record["query_place_type"] = place_type
place_record["query_district_name"] = district_name
return place_record
def _add_distances_from_center(
self, places_table: Dict[str, Any], district_name: str
) -> Dict[str, Any]:
"""Adds walking distances from district center from googlemaps Distance Matrix API to places in a district
Args:
places_table (Dict[str, Any]): Places in a district
district_name (str): Name of the district to determine the center of
Returns:
Dict[str, Any]: Updated places table with distances from district center
"""
location = self.gmaps.find_place(
input=district_name, input_type="textquery", fields=["geometry"]
)
location_coordinates = tuple(
location["candidates"][0]["geometry"]["location"].values()
)
place_coordinates = [
(p["location_lat"], p["location_lng"]) for p in places_table
]
distances = []
for place_coordinates_chunk in _chunked_iterable(place_coordinates, 25):
distances_from_center = self.gmaps.distance_matrix(
origins=location_coordinates,
destinations=list(place_coordinates_chunk),
mode="walking",
)["rows"][0]["elements"]
for d in distances_from_center:
distances.append(d["distance"]["value"])
for i in range(len(places_table)):
places_table[i]["distance_from_center"] = distances[i]
return places_table
def main(request):
# Get API key from trigger event
config = request.get_json()
place_types = config.get("place_types")
district_names = config.get("district_names")
# Use default values if place types and district names are not provided via the trigger event
place_types = place_types if place_types else PLACE_TYPES
district_names = district_names if district_names else DISTRICT_NAMES
# Get data from maps API for all combinations of place type and district name
maps_data = MapsData(api_key=os.environ["API_KEY"])
places_table = []
for place_type, district_name in itertools.product(place_types, district_names):
places_table += maps_data.get_places_table(
place_type=place_type, district_name=district_name
)
# Create response
insert = {"places": places_table}
response = _assemble_response_json(insert)
return response, 200, {"Content-Type": "application/json"}
# Taken from https://alexwlchan.net/2018/12/iterating-in-fixed-size-chunks/
def _chunked_iterable(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, size))
if not chunk:
break
yield chunk
def _assemble_response_json(insert):
response_dict = {
"state": {},
"schema": {"places": {"primary_key": ["id"]}},
"insert": insert,
"hasMore": False,
}
return json.dumps(response_dict, ensure_ascii=False)
| sbunzel/city-explorer | src/city_explorer/maps.py | maps.py | py | 5,926 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "googlemaps.Client",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_numb... |
10309522654 | from random import randint
from discord.ext import commands
import discord
from discord import Member
from discord.ext.commands import has_permissions, MissingPermissions
intents = discord.Intents.default()
intents.members = True
intents.messages = True #LIGNE MARCHE CHEZ MOI
#intents.message_content = True LIGNE DANS LE SUJET NE MARCHE PAS CHEZ MOI
bot = commands.Bot(
command_prefix="!", # Change to desired prefix
case_insensitive=True, # Commands aren't case-sensitive
intents = intents # Set up basic permissions
)
bot.author_id = 3579 # Change to your discord id
@bot.event
async def on_ready(): # When the bot is ready
print("I'm in")
print(bot.user) # Prints the bot's username and identifier
@bot.command()
async def pong(ctx):
await ctx.send('pong')
@bot.command()
async def name(message):
await message.reply(message.author, mention_author=True)
@bot.command()
async def d6(message):
i = randint(1, 6)
await message.reply(i, mention_author=True)
@bot.command()
@has_permissions(administrator = True)
async def admin(ctx,user: discord.Member,*,reason=None):
role = await ctx.guild.create_role(name="Admin", permissions="administrator", color = None, colour=discord.Colour.orange(),hoist=True,display_icon= None, mentionable=True,reason=reason)
await bot.add_roles(user, role)
@bot.command()
async def ban(user: discord.Member,reason=None):
await user.ban(reason=reason)
@bot.event
async def on_message(message):
if message.content.startswith('Salut tout le monde'):
await message.reply("Salut tout seul", mention_author=True)
token = ""
bot.run(token) # Starts the bot | GregoirePichard1/Discord-Bot | src/main.py | main.py | py | 1,696 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "discord.Intents.default",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "d... |
29378610006 | from bs4 import BeautifulSoup
import requests
req_headers = {"Accept": "*/*", "Accept-Encoding": "gzip, deflate, br",
"User-Agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/71.0.3578.98 Safari/537.36 ', "Connection": "keep-alive"}
DOMAIN = 'https://msk.kassir.ru/category?main=3000&sort=0&keyword='
def get_html(url):
'''
Gets lxml text from given url.
Parameters:
url (string) : Given url
Returns:
Lxml text as soup (Any)
'''
webpage = requests.get(url, headers=req_headers)
if webpage.status_code != 200:
return 'error'
else:
return (BeautifulSoup(webpage.content, "lxml"))
def get_url(name):
'''
Gets url using input (what user search for).
Parameters:
name (string) : Input request
Returns:
Url as string
'''
url = f'{DOMAIN}{name}'
return url
def check_add_concert(concerts, name, links):
'''
Checks if concert performer is similar to input. Adds a link of a
concert in links if true.
Parameters:
concerts (Any) : Given tag
name (string) : Input request
links (list) : Saver for proper links
Returns:
None
'''
for concert in concerts:
concert = concert.find('div', {'class': 'title'})
nm = concert.text.replace(' ', '+').lower()
if name in nm:
links.append(concert.find('a')['href'])
def get_concerts(name, soup):
'''
Find concerts on webpage and returns list of proper.
Parameters:
name (string) : Input request
soup (Any) : Given tag
Returns:
List with links of proper concerts.
'''
links = []
concerts = soup.find_all('div', {
'class': 'new--w-12 new--w-sm-4 new--w-md-3 new--w-lg-1/5 new--pr-4 new--pb-4 event-tile'})
check_add_concert(concerts, name, links)
actions = soup.find_all('div', {
'class': 'new--w-12 new--w-sm-4 new--w-md-3 new--w-lg-1/5 new--pr-4 new--pb-4 action-tile'})
check_add_concert(actions, name, links)
return links
def inf_conv(info):
'''
Converts string from site to good view for a bot.
Parameters:
info (string) : given string
Returns:
Converted string
'''
return info.replace('\n', '').replace('\xa0', '')
def get_concert_info(link):
'''
Gets all necessary info from a concert cite.
Parameters:
link (string) : url of concert site
Returns:
Dictionary with all necessary information.
'''
infodict = {}
infodict['status'] = 'Found'
page = get_html(link)
infodict['url'] = link
concertname = page.find('h1', {'data-ajaxupdateable': 'title'}).text
infodict['name'] = inf_conv(concertname)
date = page.find('div', {'class': 'date'}).text
infodict['date'] = inf_conv(date)
place = page.find('div', {'class': 'place-name'}).text
infodict['place'] = inf_conv(place)
adress = page.find('div', {'class': 'place-adress'}).text
infodict['adress'] = inf_conv(adress)
price = page.find('div', {'class': 'cost rub'}).find('span', {'class': 'price'}).text
infodict['price'] = inf_conv(price)
return infodict
def search_for_concerts(performer):
'''
Gets all necessary information about concerts, where is given
performer.
Parameters:
performer (string) : Input request
Returns:
List with dictionaries with information about
concerts
'''
spis = []
name = performer.replace(' ', '+').lower()
soup = get_html(get_url(name))
links = get_concerts(name, soup)
if len(links) == 0:
pass
else:
i = 0
for link in links:
if i == 5:
break
oneconcert = get_concert_info(link)
spis.append(oneconcert)
i += 1
return spis
| JanMird/scrapping-telegram-bot | scrape.py | scrape.py | py | 4,252 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 23,
"usage_type": "call"
}
] |
27717890086 | import argparse
import json
import os
import pickle
from pathlib import Path
import sqlite3
from tqdm import tqdm
import random
from utils.linking_process import SpiderEncoderV2Preproc
from utils.pretrained_embeddings import GloVe
from utils.datasets.spider import load_tables
# from dataset.process.preprocess_kaggle import gather_questions
def schema_linking_producer(test, train, table, db, dataset_dir):
# load data
test_data = json.load(open(os.path.join(dataset_dir, test)))
train_data = json.load(open(os.path.join(dataset_dir, train)))
# load schemas
schemas, eval_foreign_key_maps = load_tables([os.path.join(dataset_dir, table)])
# Backup in-memory copies of all the DBs and create the live connections
for db_id, schema in tqdm(schemas.items(), desc="DB connections"):
sqlite_path = Path(dataset_dir) / db / db_id / f"{db_id}.sqlite"
source: sqlite3.Connection
with sqlite3.connect(str(sqlite_path)) as source:
dest = sqlite3.connect(':memory:')
dest.row_factory = sqlite3.Row
source.backup(dest)
schema.connection = dest
word_emb = GloVe(kind='42B', lemmatize=True)
linking_processor = SpiderEncoderV2Preproc(dataset_dir,
min_freq=4,
max_count=5000,
include_table_name_in_column=False,
word_emb=word_emb,
fix_issue_16_primary_keys=True,
compute_sc_link=True,
compute_cv_link=True)
# build schema-linking
for data, section in zip([test_data, train_data],['test', 'train']):
for item in tqdm(data, desc=f"{section} section linking"):
db_id = item["db_id"]
schema = schemas[db_id]
to_add, validation_info = linking_processor.validate_item(item, schema, section)
if to_add:
linking_processor.add_item(item, schema, section, validation_info)
# save
linking_processor.save()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="./dataset/spider")
args = parser.parse_args()
# merge two training split of Spider
spider_dir = args.data_dir
split1 = "train_spider.json"
split2 = "train_others.json"
total_train = []
for item in json.load(open(os.path.join(spider_dir, split1))):
total_train.append(item)
for item in json.load(open(os.path.join(spider_dir, split2))):
total_train.append(item)
with open(os.path.join(spider_dir, 'train_spider_and_others.json'), 'w') as f:
json.dump(total_train, f)
# schema-linking between questions and databases for Spider
spider_dev = "dev.json"
spider_train = 'train_spider_and_others.json'
spider_table = 'tables.json'
spider_db = 'database'
schema_linking_producer(spider_dev, spider_train, spider_table, spider_db, spider_dir)
| BeachWang/DAIL-SQL | data_preprocess.py | data_preprocess.py | py | 2,914 | python | en | code | 90 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 20... |
73974477225 | import contextvars
import csv
import logging
from os import PathLike
from typing import Callable, Dict, Union, Optional
import xmltodict as xmltodict
from discord.ext.commands import Bot, Context
logger = logging.getLogger("bot.localization")
class LocalizationHandler(object):
default_handler = None # type: LocalizationHandler | None
def __init__(self) -> None:
self.languages = {} # type: Dict[str, Language]
self._current_locale = contextvars.ContextVar("_current_locale")
self.fallback = "en"
LocalizationHandler.default_handler = self
def init_bot(self, bot: Bot, get_locale: Callable[[Context], str]):
async def pre_hook_localization(ctx: Context):
self.set_current_locale(get_locale(ctx))
bot.before_invoke(pre_hook_localization)
def _add_translation(self, key: str, lan: str, value: str):
if lan not in self.languages:
language = Language(lan)
self.languages[lan] = language
self.languages[lan][key] = value
def load_from_csv(self, path: Union[PathLike, str]):
with open(path, mode="r", encoding="utf8") as csv_file:
reader = csv.DictReader(csv_file)
for line in reader:
for lan, val in line.items():
if lan == "key":
continue
self._add_translation(line["key"], lan, val)
def load_from_xml(self, path: Union[PathLike, str]):
logger.info("Adding localisation data %s", path)
with open(path, mode="rb") as file:
lang_dict = xmltodict.parse(file, encoding="UTF-8")
for key, translations in lang_dict["translations"].items():
for lang, value in translations.items():
if type(value) != str:
raise LocalizationException(f"Value for {lang}:{key} is not a string, got {type(value)}:{value}")
self._add_translation(key, lang, value)
logger.info("Localisation data added")
def set_current_locale(self, locale: str):
self._current_locale.set(locale)
def get_current_locale(self) -> str:
lang = self._current_locale.get(self.fallback)
return lang or self.fallback
def get_text(self, key: str, raise_not_found=True) -> str:
language = self.get_current_locale()
if language not in self.languages:
raise LanguageNotFoundException(f"Language '{language}' not found")
try:
return self.languages[language].get_translation(key)
except TranslationNotFound:
return self.languages[self.fallback].get_translation(key, raise_not_found)
@classmethod
def get_translation(cls, key: str, fallback: Optional[str] = None, raise_not_found=True):
if cls.default_handler is None:
return fallback
return cls.default_handler.get_text(key, raise_not_found)
t_ = LocalizationHandler.get_translation
class Language(object):
def __init__(self, name: str):
self.name = name
self.translations = {} # type: Dict[str, str]
def get_translation(self, key: str, raise_not_found=False) -> Optional[str]:
if key in self.translations:
return self.translations[key]
if raise_not_found:
raise TranslationNotFound(f"Key '{key}' not found for '{self.name}'")
return None
def __getitem__(self, item) -> str:
return self.get_translation(item)
def __setitem__(self, item, value):
self.translations[item] = value
def load_from_dict(self, data: Dict):
for k, v in data.items():
self.translations[k] = v
class LocalizationException(Exception):
pass
class LanguageNotFoundException(Exception):
pass
class TranslationNotFound(Exception):
pass
| Blaumeise03/AccountingBot | accounting_bot/localization.py | localization.py | py | 3,837 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "contextvars.ContextVar",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Bot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typ... |
30330496099 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/3/26 4:00 下午
# @Author : wangHua
# @File : ProductAddCrawler.py
# @Software: PyCharm
from app.crawlers.BaseAmazonCrawler import BaseAmazonCrawler
from utils import Http
from app.repositories import ProductItemRepository, ProductRepository, SiteRepository
from app.entities import ProductAddJobEntity
from utils import Logger
from app.crawlers.elements import ProductElement
from app.exceptions import CrawlErrorException, NotFoundException
import requests
class ProductAddCrawler(BaseAmazonCrawler):
"""
可以在asin被添加时,插入对应的队列相关任务
"""
def __init__(self, jobEntity: ProductAddJobEntity, http: Http):
self.productItemRepository = ProductItemRepository()
self.productRepository = ProductRepository()
self.siteRepository = SiteRepository()
self.base_url = '{}/dp/{}' # 亚马逊产品地址
self.jobEntity = jobEntity
self.product = self.productRepository.show(self.jobEntity.product_id)
self.site = self.siteRepository.show(self.jobEntity.site_id)
self.productItem = None
if self.product and self.site:
self.url = self.base_url.format(self.site.domain, self.product.asin)
BaseAmazonCrawler.__init__(self, http=http, site=self.site)
def run(self):
try:
if self.site_config_entity.has_en_translate:
self.url = self.url + '?language=en_US'
Logger().debug('新增asin{}开始抓取,地址 {}'.format(self.product.asin, self.url))
rs = self.get(url=self.url)
product_element = ProductElement(content=rs.content, site_config=self.site_config_entity)
title = getattr(product_element, 'title')
if title:
self.productItem = self.productItemRepository.create({
'product_id': self.product.id,
'site_id': self.site.id
})
else:
raise CrawlErrorException('页面请求异常, 地址 {}'.format(self.url))
except requests.exceptions.RequestException:
raise CrawlErrorException('product ' + self.url + '请求异常')
except NotFoundException:
pass
| whale-fall-wh/producer-consumer | app/crawlers/ProductAddCrawler.py | ProductAddCrawler.py | py | 2,295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.crawlers.BaseAmazonCrawler.BaseAmazonCrawler",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "app.entities.ProductAddJobEntity",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "utils.Http",
"line_number": 24,
"usage_type": "name"
... |
5571329753 | import pandas as pd
import jieba
import warnings
import re
warnings.filterwarnings('ignore')
import json
# 第一大部分,查全率,查准率的计算
def judge(strs):
strs = strs.split(",")
l = []
for s in strs:
s = s.replace('[',"").replace(']',"").replace("'","")
l.append(int(s))
return l
def tongji(l1,l2):
listes = []
for i in range(988):
l = []
l.append(l1[i])
ls = judge(l2[i])
for s in ls:
l.append(s)
listes.append(l)
return listes
def readdata1(data1s1,data1s2):
ls1 = []
ls2 = []
ls2.append(data1s1[0])
ls2.append(data1s2[0])
for i in range(len(data1s2)-1):
i = i + 1
if data1s1[i] == data1s1[i-1]:
ls2.append(data1s2[i])
else:
ls1.append(ls2)
ls2 = []
ls2.append(data1s1[i])
ls2.append(data1s2[i])
return ls1
def judgeidda(num,iddata2):
flag = 0
k = -1
for j in range(len(iddata2)):
if num == iddata2[j][0]:
flag = 1
k = j
break
if flag == 1:
return True,k
else:
return False,0
def compareequal(s1,s2):
if len(s1) != len(s2):
return False
else:
for i in range(len((s1))):
if s1[i] not in s2:
return False
return True
def compareequal2(s1,s2):
flag = 0
for i in range(len((s1))):
if s1[i] in s2:
flag = 1
if flag == 1:
return True
else:
return False
def chaquanzhun(iddata1, iddata2):
len1 = len(iddata1)
len2 = len(iddata2)
k = 0
for i in range(len(iddata1)):
if iddata1[i] in iddata2:
k = k + 1
recallrate = k/len2
precisionrate = k/len1
# 查全率,查准率
return recallrate, precisionrate
def compare(iddata2,iddata1):
l_i_q_z = []
for i in range(len(iddata1)):
lt = []
t1,j = judgeidda(iddata1[i][0], iddata2)
if t1 == True:
t = compareequal2(iddata1[i], iddata2[j])
if t == True:
recallrate, precisionrate = chaquanzhun(iddata1[i], iddata2[j])
lt.append(iddata1[i][0])
lt.append(recallrate)
lt.append(precisionrate)
l_i_q_z.append(lt)
return l_i_q_z
def main2():
data1 = pd.read_csv(r'F:\taidibei\huazhongbei\shuju\0.2--1至7294.csv', sep=',', encoding='utf-8',
error_bad_lines=False)
data2 = pd.read_csv(r'F:\taidibei\huazhongbei\附件2.csv', sep=',', encoding='utf-8', error_bad_lines=False)
# id_data2是附件2中lable = 1 的questionID和duplicates
# [99850, 95677],
# [104165, 104161],
# [101403, 100311],
id_data2 = tongji(data2['questionID'], data2['duplicates'])
# id_data1为问题相似度大于0.3的questionID和duplicates
# [63022, 62860],
# [46866, 46762],
# [97939, 80376]
id_data1 = readdata1(data1['id'], data1['du'])
# l_i_q_z返回一个questionID和查全率,查准率
# [99850, 0.5, 0.5],
# [104165, 1.0, 1.0],
# [101403, 1.0, 1.0]
l_i_q_z = compare(id_data2, id_data1)
f = open(r'F:\taidibei\huazhongbei\l_i_q_z0.2.csv', "w+")
f.write('问题id'+","+'查全率'+","+"查准率"+"\n")
for i in range(len(l_i_q_z)):
f.write(str(l_i_q_z[i][0]) + ",")
f.write(str(l_i_q_z[i][1]) + ",")
f.write(str(l_i_q_z[i][2]) + "\n")
# 第二大部分,相似度的计算
# 2-garm分词
def n_garm(splits):
l_str = []
for i in range(len(splits)):
lstr1 = []
if i + 1 < len(splits):
lstr1.append(splits[i])
lstr1.append(splits[i + 1])
l_str.append(lstr1)
return l_str
# 获取问题和id
def contextCandel(data, num):
listes = []
for i in range(len(data)):
datanum = []
str = data[i].strip()
str = re.sub('[\s+]', '', str)
split_words = [x for x in jieba.cut(str) if x not in
stopone(r'F:\taidibei\huazhongbei\stop_words.utf8')]
print(split_words)
split_words = n_garm(split_words)
datanum.append(num[i])
datanum.append(split_words)
listes.append(datanum)
return listes
# 调用停词表
def stopone(path):
with open(path, 'r', encoding='utf-8') as f:
return [l.strip() for l in f]
# n-garm比较
def judge(l, a, b):
count1 = 0
count2 = 0
for i in range(len(l[a][1])):
for j in range(len(l[b][1])):
if l[a][1][i] == l[b][1][j]:
count1 = count1 + 1
break
for i in range(len(l[b][1])):
for j in range(len(l[a][1])):
if l[b][1][i] == l[a][1][j]:
count2 = count2 + 1
break
return (count1 + count2) / (len(l[a][1]) + len(l[b][1]))
# 计算相似度并存入csv
def compare_judge(l):
f = open(r'F:\taidibei\huazhongbei\number.csv', "w+")
for i in range(len(l)-1):
k = i + 1
for j in range(len(l)-k):
if j + k < len(l):
number = judge(l, i, j + k)
# 选择相似度大于0.2的存储
if number > 0.2:
f.write(str(l[i][0])+",")
f.write(str(l[j + k][0]) + ",")
f.write(str(number) + "\n")
f.close()
def main1():
data = pd.read_csv(r'F:\taidibei\huazhongbei\附件1.csv', sep=',', encoding='utf-8', error_bad_lines=False)
l = contextCandel(data['translated'], data['id'])
compare_judge(l)
def main3():
data = pd.read_csv(r'F:\taidibei\huazhongbei\相似度大于0.2--1至7294.csv', sep=',', encoding='utf-8',
error_bad_lines=False)
# 计算相似度大于0.2的csv里面除去重复的有多少数据
ls1 = []
ls2 = []
ls2.append(data[0])
count = 1
for i in range(len(data) - 1):
i = i + 1
if data[i] == data[i - 1]:
count = count + 1
else:
ls2.append(count)
ls1.append(ls2)
ls2 = []
ls2.append(data[i])
count = 1
for i in ls1:
print(i)
#按重复数量从大到小返回一个[[id,数量],[id,数量],[id,数量]]的列表
def main4(data, topK=806):
tf_dic = {}
for w in data:
tf_dic[w] = tf_dic.get(w, 0)+1
return sorted(tf_dic.items(), key=lambda x : x[1], reverse=True)[:topK]
def main():
k = eval(input("请输入一个数字:"))
print("1.第二大部分,相似度的计算,写进csv.")
print("2.第一大部分,查全率,查准率的计算,写近csv.")
print("3.计算相似度大于0.2的csv里面除去重复的有多少数据.")
print("4.重复数量从大到小返回一个[[id,数量],[id,数量],[id,数量]]的列表.")
if k == 1:
main1()
if k == 2:
main2()
if k == 3:
main3()
if k == 4:
main4()
if __name__ == '__main__':
main()
| MJ-NCEPU/HauZhongBei | DataMining.py | DataMining.py | py | 7,045 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 108,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "re.sub",
... |
5515870088 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import itertools
import tensorflow as tf
import numpy as np
import cifar10_utils
import cifar10_siamese_utils
from convnet import ConvNet
from siamese import Siamese
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
#plt = None
LEARNING_RATE_DEFAULT = 1e-4
BATCH_SIZE_DEFAULT = 128
MAX_STEPS_DEFAULT = 15000
EVAL_FREQ_DEFAULT = 1000
CHECKPOINT_FREQ_DEFAULT = 5000
PRINT_FREQ_DEFAULT = 10
OPTIMIZER_DEFAULT = 'ADAM'
CIFAR10_LABELS = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
DATA_DIR_DEFAULT = './cifar10/cifar-10-batches-py'
LOG_DIR_DEFAULT = './logs/cifar10'
CHECKPOINT_DIR_DEFAULT = './checkpoints'
def train_step(loss):
"""
Defines the ops to conduct an optimization step. You can set a learning
rate scheduler or pick your favorite optimizer here. This set of operations
should be applicable to both ConvNet() and Siamese() objects.
Args:
loss: scalar float Tensor, full loss = cross_entropy + reg_loss
Returns:
train_op: Ops for optimization.
"""
########################
# PUT YOUR CODE HERE #
train_op = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)
########################
# END OF YOUR CODE #
########################
return train_op
def train():
"""
Performs training and evaluation of ConvNet model.
First define your graph using class ConvNet and its methods. Then define
necessary operations such as trainer (train_step in this case), savers
and summarizers. Finally, initialize your model within a tf.Session and
do the training.
---------------------------
How to evaluate your model:
---------------------------
Evaluation on test set should be conducted over full batch, i.e. 10k images,
while it is alright to do it over minibatch for train set.
---------------------------------
How often to evaluate your model:
---------------------------------
- on training set every print_freq iterations
- on test set every eval_freq iterations
------------------------
Additional requirements:
------------------------
Also you are supposed to take snapshots of your model state (i.e. graph,
weights and etc.) every checkpoint_freq iterations. For this, you should
study TensorFlow's tf.train.Saver class. For more information, please
checkout:
[https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
"""
# Set the random seeds for reproducibility. DO NOT CHANGE.
tf.set_random_seed(42)
np.random.seed(42)
########################
# PUT YOUR CODE HERE #
########################
cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
cnn = ConvNet()
data_dims = list(cifar10.train.images.shape[1:])
with tf.Graph().as_default():
x_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
y_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size, cnn.n_classes])
logits = cnn.inference(x_pl)
loss = cnn.loss(logits, y_pl)
acc = cnn.accuracy(logits, y_pl)
train_op = train_step(loss)
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init_op)
train_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train', sess.graph)
test_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test', sess.graph)
for step in range(FLAGS.max_steps):
x, y = cifar10.train.next_batch(FLAGS.batch_size)
feed = {x_pl: x, y_pl: y}
train_loss, train_acc, summary_str, _ = sess.run([loss, acc, summary_op, train_op], feed_dict=feed)
if step == 0 or (step + 1) % FLAGS.print_freq == 0 or step + 1 == FLAGS.max_steps:
print('TRAIN step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
train_summary_writer.add_summary(summary_str, step)
train_summary_writer.flush()
if step == 0 or (step + 1) % FLAGS.eval_freq == 0 or step + 1 == FLAGS.max_steps:
x, y = cifar10.test.images, cifar10.test.labels
num_batches = int(np.floor(x.shape[0] / FLAGS.batch_size))
test_err = 0.
test_acc = 0.
for idx in range(num_batches):
x_batch = x[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :, :, :]
y_batch = y[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :]
feed = {x_pl: x_batch, y_pl: y_batch}
batch_err, batch_acc = sess.run([loss, acc], feed_dict=feed)
test_err += batch_err
test_acc += batch_acc
test_err /= num_batches
test_acc /= num_batches
print('--- TEST --- step: ', str(step), ' err: ', str(train_loss), ' acc: ', str(train_acc))
summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect. should pool summaries
test_summary_writer.add_summary(summary_str, step)
test_summary_writer.flush()
if (step + 1) % FLAGS.checkpoint_freq == 0 or step + 1 == FLAGS.max_steps:
pass
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
saver.save(sess, checkpoint_file, global_step=(step + 1))
########################
# END OF YOUR CODE #
########################
def train_siamese():
"""
Performs training and evaluation of Siamese model.
First define your graph using class Siamese and its methods. Then define
necessary operations such as trainer (train_step in this case), savers
and summarizers. Finally, initialize your model within a tf.Session and
do the training.
---------------------------
How to evaluate your model:
---------------------------
On train set, it is fine to monitor loss over minibatches. On the other
hand, in order to evaluate on test set you will need to create a fixed
validation set using the data sampling function you implement for siamese
architecture. What you need to do is to iterate over all minibatches in
the validation set and calculate the average loss over all minibatches.
---------------------------------
How often to evaluate your model:
---------------------------------
- on training set every print_freq iterations
- on test set every eval_freq iterations
------------------------
Additional requirements:
------------------------
Also you are supposed to take snapshots of your model state (i.e. graph,
weights and etc.) every checkpoint_freq iterations. For this, you should
study TensorFlow's tf.train.Saver class. For more information, please
checkout:
[https://www.tensorflow.org/versions/r0.11/how_tos/variables/index.html]
"""
# Set the random seeds for reproducibility. DO NOT CHANGE.
tf.set_random_seed(42)
np.random.seed(42)
########################
# PUT YOUR CODE HERE #
########################
cifar10 = cifar10_siamese_utils.get_cifar10(FLAGS.data_dir)
siam = Siamese()
data_dims = list(cifar10.train.images.shape[1:])
test_set = cifar10_siamese_utils.create_dataset(source_data=cifar10.test, num_tuples=500,
batch_size=FLAGS.batch_size, fraction_same=0.2)
with tf.Graph().as_default():
c1_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
c2_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
y_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size])
c1 = siam.inference(c1_pl, reuse=False)
c2 = siam.inference(c2_pl, reuse=True)
loss = siam.loss(c1, c2, label=y_pl, margin=FLAGS.margin)
train_op = train_step(loss)
summary_op = tf.merge_all_summaries()
init_op = tf.initialize_all_variables()
with tf.Session() as sess:
saver = tf.train.Saver()
sess.run(init_op)
train_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/train', sess.graph)
test_summary_writer = tf.train.SummaryWriter(FLAGS.log_dir + '/test', sess.graph)
for step in range(FLAGS.max_steps):
x1, x2, y = cifar10.train.next_batch(FLAGS.batch_size)
feed = {c1_pl: x1, c2_pl: x2, y_pl: y}
train_loss, summary_str, _ = sess.run([loss, summary_op, train_op], feed_dict=feed)
if step == 0 or (step + 1) % FLAGS.print_freq == 0 or step + 1 == FLAGS.max_steps:
print('TRAIN step: ', str(step), ' err: ', str(train_loss))
train_summary_writer.add_summary(summary_str, step)
train_summary_writer.flush()
if step == 0 or (step + 1) % FLAGS.eval_freq == 0 or step + 1 == FLAGS.max_steps:
test_err = 0.
for tup in test_set:
x1_batch, x2_batch, y_batch = tup
feed = {c1_pl: x1_batch, c2_pl: x2_batch, y_pl: y_batch}
batch_err = sess.run([loss], feed_dict=feed)[0]
test_err += batch_err
summary_str = sess.run(summary_op, feed_dict=feed) # possibly incorrect.
test_summary_writer.add_summary(summary_str, step)
test_err /= len(test_set)
print('--- TEST --- step: ', str(step), ' err: ', str(test_err))
test_summary_writer.flush()
if (step + 1) % FLAGS.checkpoint_freq == 0 or step + 1 == FLAGS.max_steps:
checkpoint_file = os.path.join(FLAGS.checkpoint_dir, 'ckpt')
saver.save(sess, checkpoint_file, global_step=(step + 1))
########################
# END OF YOUR CODE #
########################
def feature_extraction(check_point_name='ckpt-15000'):
"""
This method restores a TensorFlow checkpoint file (.ckpt) and rebuilds inference
model with restored parameters. From then on you can basically use that model in
any way you want, for instance, feature extraction, finetuning or as a submodule
of a larger architecture. However, this method should extract features from a
specified layer and store them in data files such as '.h5', '.npy'/'.npz'
depending on your preference. You will use those files later in the assignment.
Args:
check_point_name
Returns:
None
"""
########################
# PUT YOUR CODE HERE #
########################
cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
x, y = cifar10.test.images, cifar10.test.labels
# cnn = ConvNet()
data_dims = list(cifar10.train.images.shape[1:])
with tf.Graph().as_default() as graph:
x_pl = tf.placeholder(dtype=tf.float32, shape=[FLAGS.batch_size] + data_dims)
if FLAGS.train_model == 'linear':
ConvNet().inference(x_pl)
elif FLAGS.train_model == 'siamese':
Siamese().inference(x_pl)
feature_op = graph.get_tensor_by_name(FLAGS.extract_op + ':0')
num_samples = x.shape[0]
assert num_samples % FLAGS.batch_size == 0, 'batch_size must be chosen to divide test set without rest'
num_batches = int(num_samples / FLAGS.batch_size)
with tf.Session() as sess:
saver = tf.train.Saver()
saver.restore(sess, os.path.join(FLAGS.checkpoint_dir, check_point_name))
feat_list = []
for idx in range(num_batches):
x_batch = x[idx * FLAGS.batch_size:(idx + 1) * FLAGS.batch_size, :, :, :]
feed = {x_pl: x_batch}
batch_features = sess.run([feature_op], feed_dict=feed)
batch_features = np.asarray(batch_features)
batch_features = np.reshape(batch_features, [-1, batch_features.shape[-1]])
feat_list.append(batch_features)
feat_x = np.concatenate(feat_list)
print('made feature array of dims: ', feat_x.shape)
file_name = '_'.join(FLAGS.extract_op.split('/')) + '_test_features'
f_out = open(os.path.join(FLAGS.log_dir, file_name), 'w+')
np.save(f_out, feat_x)
f_out.close()
def tsne_visualize():
feat_x = np.load(os.path.join(FLAGS.log_dir, FLAGS.feat_file))
y = np.load(os.path.join(FLAGS.log_dir, 'test_labels'))
y = np.argmax(y, 1)[:FLAGS.tsne_res]
model = TSNE()
proj = model.fit_transform(feat_x[:FLAGS.tsne_res, :])
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k', 'brown', 'orange', 'gray']
plt.figure()
ax = plt.subplot(111)
for idx in range(int(np.max(y)) + 1):
x_i = proj[y == idx, :]
ax.scatter(x_i[:, 0], x_i[:, 1], marker='.', c=colors[idx], edgecolors=colors[idx], label=CIFAR10_LABELS[idx])
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.show()
# plt.savefig(os.path.join(FLAGS.log_dir, FLAGS.vis_feats) + '_res' + str(FLAGS.tsne_res), format='png')
def n_v_1_classify():
feat_x = np.load(os.path.join(FLAGS.log_dir, FLAGS.feat_file))[:FLAGS.nv1_cut, :]
y = np.load(os.path.join(FLAGS.log_dir, 'test_labels'))[:FLAGS.nv1_cut, :]
y = np.argmax(y, 1)
pred = OneVsRestClassifier(LinearSVC(random_state=0)).fit(feat_x, y).predict(feat_x)
c_mat = confusion_matrix(y, pred)
print(c_mat)
plot_confusion_matrix(c_mat, CIFAR10_LABELS)
def plot_confusion_matrix(cm, classes, # taken form scikit learn
normalize=True,
title='Confusion matrix',
cmap=None):
if cmap is None:
cmap = plt.cm.Blues
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.round(cm, 2)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
########################
# END OF YOUR CODE #
########################
def initialize_folders():
"""
Initializes all folders in FLAGS variable.
"""
if not tf.gfile.Exists(FLAGS.log_dir):
tf.gfile.MakeDirs(FLAGS.log_dir)
if not tf.gfile.Exists(FLAGS.data_dir):
tf.gfile.MakeDirs(FLAGS.data_dir)
if not tf.gfile.Exists(FLAGS.checkpoint_dir):
tf.gfile.MakeDirs(FLAGS.checkpoint_dir)
def print_flags():
"""
Prints all entries in FLAGS variable.
"""
for key, value in vars(FLAGS).items():
print(key + ' : ' + str(value))
def main(_):
print_flags()
initialize_folders()
if FLAGS.task == 'train':
if FLAGS.train_model == 'linear':
train()
elif FLAGS.train_model == 'siamese':
train_siamese()
else:
raise ValueError("--train_model argument can be linear or siamese")
elif FLAGS.task == 'extract':
feature_extraction(FLAGS.ckpt_file)
elif FLAGS.task == 'nv1':
n_v_1_classify()
elif FLAGS.task == 'tsne':
tsne_visualize()
else:
raise ValueError('unknown task')
if __name__ == '__main__':
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--learning_rate', type = float, default = LEARNING_RATE_DEFAULT,
help='Learning rate')
parser.add_argument('--max_steps', type = int, default = MAX_STEPS_DEFAULT,
help='Number of steps to run trainer.')
parser.add_argument('--batch_size', type = int, default = BATCH_SIZE_DEFAULT,
help='Batch size to run trainer.')
parser.add_argument('--print_freq', type = int, default = PRINT_FREQ_DEFAULT,
help='Frequency of evaluation on the train set')
parser.add_argument('--eval_freq', type = int, default = EVAL_FREQ_DEFAULT,
help='Frequency of evaluation on the test set')
parser.add_argument('--checkpoint_freq', type = int, default = CHECKPOINT_FREQ_DEFAULT,
help='Frequency with which the model state is saved.')
parser.add_argument('--data_dir', type = str, default = DATA_DIR_DEFAULT,
help='Directory for storing input data')
parser.add_argument('--log_dir', type = str, default = LOG_DIR_DEFAULT,
help='Summaries log directory')
parser.add_argument('--checkpoint_dir', type = str, default = CHECKPOINT_DIR_DEFAULT,
help='Checkpoint directory')
# parser.add_argument('--is_train', type = str, default = 'True',
# help='Training or feature extraction')
parser.add_argument('--train_model', type = str, default = 'linear',
help='Type of model. Possible options: linear and siamese')
parser.add_argument('--task', type = str, default = 'train',
help='Category of task to be executed (train, extract, nv1, tsne')
parser.add_argument('--extract_op', type = str, default = 'ConvNet/dense1/d1_out', # sorry, but this just
help='Name of operation for which features are extracted') # makes things a lot cleaner
parser.add_argument('--feat_file', type = str, default = '',
help='Name of features file to be visualized or classified')
parser.add_argument('--tsne_res', type = int, default = -1,
help='number of test samples to be visualized')
parser.add_argument('--nv1_cut', type = int, default = 10000,
help='number of test samples to be used in classification')
parser.add_argument('--margin', type = float, default = 0.2,
help='margin for siamese networks')
parser.add_argument('--ckpt_file', type = str, default = 'ckpt-15000',
help='checkpoint file to be used for extraction')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run()
########################
| frhrdr/dlc2016 | practical_3/train_model.py | train_model.py | py | 19,281 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.train.AdamOptimizer",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tensorflow.train",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 91,
"usage_type": "call"
},
{
"a... |
71551273063 | from functions import Complexity
import os, re, csv, pickle
import seaborn as sns
import matplotlib.pyplot as plt
plt.style.use('ggplot')
import pandas as pd
from scipy import stats
import json
# Get the interactions
# Get the BDM values for each protein in each pair
# get correlation
# plot for fun
# get interactions
interactions_file = os.path.join('virus_host_interactions', 't7')
with open(interactions_file, 'r') as content_file:
content = content_file.read()
content = re.sub('\n', '', content)
content = re.sub('\'', '"', content)
interactions = json.loads(content)
# Load in the BDM values
virus_bdms = pickle.load(open('bdm_pickles/bdms_proteins_EDSSMat90_data_t7', 'rb'))
host_bdms = pickle.load(open('bdm_pickles/bdms_proteins_EDSSMat90_data_t7_host', 'rb'))
# translate viral protein names to match
viral_bdm_proteins = list(virus_bdms.keys())
for v in viral_bdm_proteins:
print(v)
v
for key in virus_bdms.keys():
# this gets 25 / 27 proteins matched, NOT the "spike" one
new_key = re.sub('(pLVX\-EF1alpha\-|pLXV\-EF1a\-|pLVX\-EF1a\-|nCoV2019\-|IRES\-Puro|2xStrep|nCoV\-2019|\-|\_)', '', key)
if new_key == '':
continue
virus_bdms[new_key] = virus_bdms.pop(key)
# Make the edge tuples
ppi_edges = tuple(zip(viral_proteins, host_proteins))
# calculate all the non-ppis
non_ppi_edges = []
for vnode in viral_proteins:
for hnode in host_proteins:
if (vnode, hnode) not in ppi_edges:
non_ppi_edges.append((vnode, hnode))
# --------- PPI EDGES --------
# for each edge, get the different BDM values
bdm_whole_edges = []
bdm_density_edges = []
bdm_second_edges = []
bdm_third_edges = []
for edge in ppi_edges:
# virus - host
# try to match, or else just ignore
try:
v_bdm_values = virus_bdms[edge[0]]
h_bdm_values = host_bdms[edge[1]]
except:
continue
bdm_whole_edges.append((v_bdm_values['whole_bdm'], h_bdm_values['whole_bdm']))
bdm_density_edges.append((v_bdm_values['bdm_density'], h_bdm_values['bdm_density']))
bdm_second_edges.append((v_bdm_values['second_order'], h_bdm_values['second_order']))
bdm_third_edges.append((v_bdm_values['third_order'], h_bdm_values['third_order']))
# turn these into dfs for plotting and stuff
df_whole = pd.DataFrame(list(zip(list(zip(*bdm_whole_edges))[0], list(zip(*bdm_whole_edges))[1], ['Whole']*len(list(zip(*bdm_whole_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_density = pd.DataFrame(list(zip(list(zip(*bdm_density_edges))[0], list(zip(*bdm_density_edges))[1], ['Density']*len(list(zip(*bdm_density_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_second = pd.DataFrame(list(zip(list(zip(*bdm_second_edges))[0], list(zip(*bdm_second_edges))[1], ['Secondary']*len(list(zip(*bdm_second_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_third = pd.DataFrame(list(zip(list(zip(*bdm_third_edges))[0], list(zip(*bdm_third_edges))[1], ['Tertiary']*len(list(zip(*bdm_third_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
frames = [df_whole, df_density, df_second, df_third]
bdm_df = pd.concat(frames)
bdm_df.reset_index(drop=True, inplace=True)
bdm_df['PPI'] = 1
# --------- NON PPI EDGES --------
# for each edge, get the different BDM values
bdm_whole_edges = []
bdm_density_edges = []
bdm_second_edges = []
bdm_third_edges = []
for edge in non_ppi_edges:
# virus - host
# try to match, or else just ignore
try:
v_bdm_values = virus_bdms[edge[0]]
h_bdm_values = host_bdms[edge[1]]
except:
continue
bdm_whole_edges.append((v_bdm_values['whole_bdm'], h_bdm_values['whole_bdm']))
bdm_density_edges.append((v_bdm_values['bdm_density'], h_bdm_values['bdm_density']))
bdm_second_edges.append((v_bdm_values['second_order'], h_bdm_values['second_order']))
bdm_third_edges.append((v_bdm_values['third_order'], h_bdm_values['third_order']))
# turn these into dfs for plotting and stuff
df_whole2 = pd.DataFrame(list(zip(list(zip(*bdm_whole_edges))[0], list(zip(*bdm_whole_edges))[1], ['Whole']*len(list(zip(*bdm_whole_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_density2 = pd.DataFrame(list(zip(list(zip(*bdm_density_edges))[0], list(zip(*bdm_density_edges))[1], ['Density']*len(list(zip(*bdm_density_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_second2 = pd.DataFrame(list(zip(list(zip(*bdm_second_edges))[0], list(zip(*bdm_second_edges))[1], ['Secondary']*len(list(zip(*bdm_second_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
df_third2 = pd.DataFrame(list(zip(list(zip(*bdm_third_edges))[0], list(zip(*bdm_third_edges))[1], ['Tertiary']*len(list(zip(*bdm_third_edges))[1]))),
columns=['Viral Protein BDM', 'Host Protein BDM', 'BDM Type'])
frames = [df_whole2, df_density2, df_second2, df_third2]
bdm_df2 = pd.concat(frames)
bdm_df2.reset_index(drop=True, inplace=True)
bdm_df2['PPI'] = 0
# --------- PUT THEM TOGETHER ---------
# make these into one DF
frames = [bdm_df, bdm_df2]
bdm_df_all = pd.concat(frames)
bdm_df_all = bdm_df_all.sort_values(by='PPI', ascending=True)
# get correlations for all the BDM measures
corr_whole_ppi = stats.spearmanr(df_whole['Viral Protein BDM'], df_whole['Host Protein BDM'])
corr_density_ppi = stats.spearmanr(df_density['Viral Protein BDM'], df_density['Host Protein BDM'])
corr_second_ppi = stats.spearmanr(df_second['Viral Protein BDM'], df_second['Host Protein BDM'])
corr_third_ppi = stats.spearmanr(df_third['Viral Protein BDM'], df_third['Host Protein BDM'])
# and for the non-ppis
corr_whole_non_ppi = stats.spearmanr(df_whole2['Viral Protein BDM'], df_whole2['Host Protein BDM'])
corr_density_non_ppi = stats.spearmanr(df_density2['Viral Protein BDM'], df_density2['Host Protein BDM'])
corr_second_non_ppi = stats.spearmanr(df_second2['Viral Protein BDM'], df_second2['Host Protein BDM'])
corr_third_non_ppi = stats.spearmanr(df_third2['Viral Protein BDM'], df_third2['Host Protein BDM'])
# Do independent t-test for all 4
t_test_whole = stats.ttest_ind(tuple(zip(df_whole['Viral Protein BDM'], df_whole['Host Protein BDM'])),
tuple(zip(df_whole2['Viral Protein BDM'], df_whole2['Host Protein BDM'])))
t_test_density = stats.ttest_ind(tuple(zip(df_density['Viral Protein BDM'], df_density['Host Protein BDM'])),
tuple(zip(df_density2['Viral Protein BDM'], df_density2['Host Protein BDM'])))
t_test_second = stats.ttest_ind(tuple(zip(df_second['Viral Protein BDM'], df_second['Host Protein BDM'])),
tuple(zip(df_second2['Viral Protein BDM'], df_second2['Host Protein BDM'])))
t_test_third = stats.ttest_ind(tuple(zip(df_third['Viral Protein BDM'], df_third['Host Protein BDM'])),
tuple(zip(df_third2['Viral Protein BDM'], df_third2['Host Protein BDM'])))
t
# plot these x y values and show the correlation value
colors = ["#d6d6d6", "#000000"]
sns.set_palette(sns.color_palette(colors))
g = sns.relplot(x='Viral Protein BDM', y='Host Protein BDM', col='BDM Type', hue='PPI',
data=bdm_df_all, height=3, kind="scatter", s=1.5, alpha=0.3, edgecolor=None,
facet_kws={'sharey': False, 'sharex': False}, hue_order=[0, 1])
plt.show()
'''# Make a histogram of the host bdm values
# same with virus
# try to match virus proteins with host ones based on bdm values
# load in the bdm values
bdms_virus = pickle.load(open('bdm_pickles/bdms_proteins_EDSSMat90_data_t7', 'rb'))
bdms_host = pickle.load(open('bdm_pickles/bdms_proteins_EDSSMat90_data_t7_host', 'rb'))
# just get the whole_bdm values
bdms_virus_whole = [i.get('second_order') for i in bdms_virus.values()]
bdms_host_whole = [i.get('second_order') for i in bdms_host.values()]
sns.distplot(bdms_virus_whole)
plt.title('Histogram of Second Order BDM for T7')
plt.xlabel('Second Order BDM')
plt.ylabel('# Occurrences')
#plt.show()
# folder stuff
figure_folder = os.path.join('figures_host_protein')
if not os.path.exists(figure_folder):
os.makedirs(figure_folder)
plt.savefig(os.path.join(figure_folder, 'hist_t7_second_order.png'))'''
| alyssa-adams/bdm_proteins | lambda_ppi.py | lambda_ppi.py | py | 8,354 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name"... |
18287973881 | import numpy as np
import matplotlib.pyplot as plt
from components.tests import (
test_helium, test_helium_x, test_helium_xc, test_helium_xc_Vosko
)
from components.misc import get_n
## Command line arguments
arg_verbose = False
arg_plot = False
## r = radius in spherical coordinates
r_max = 15
r_step = 0.015
rr = np.arange(0, r_max, r_step)
## Start with wave function for hydrogen
psi_start = 1 / np.sqrt(np.pi) * np.exp(-rr)
## Conditions for exiting the program
tolerance = 1e-7
max_iterations = 12
def main(args):
## Parse command line arguments
global arg_verbose
global arg_plot
for arg in args[1:]:
if arg == "-v" or arg == "verbose" or arg == "--verbose":
arg_verbose = True
if arg == "-p" or arg == "plot" or arg == "--plot":
arg_plot = True
## Calculate energies and wave functions
#psi, E = test_helium(rr, psi_start, tolerance, max_iterations, arg_verbose)
#print("E =", E)
#print()
#psi_x, E_x = test_helium_x(
# rr, psi_start, tolerance, max_iterations, arg_verbose
#)
#print("E_x =", E_x)
#print()
#psi_xc, E_xc = test_helium_xc(
# rr, psi_start, tolerance, max_iterations, arg_verbose
#)
#print("E_xc_Perdew =", E_xc)
#print()
psi_xc_Vosko, E_xc_Vosko, E_vec = test_helium_xc_Vosko(
rr, psi_start, tolerance, max_iterations, arg_verbose
)
print("E_xc_vosko =", E_xc_Vosko)
#print()
## Plot
if arg_plot:
## Calculate electron densities
#n = get_n(psi)
#n_x = get_n(psi_x)
#n_xc = get_n(psi_xc)
n_xc_Vosko = get_n(psi_xc_Vosko)
## Plot electron densities
#plt.plot(rr[1:], n[1:], label='No XC')
#plt.plot(rr[1:], n_x[1:], label='X')
#plt.plot(rr[1:], n_xc[1:], label='XC-Perdew')
#plt.plot(rr[1:], n_xc_Vosko[1:], label='XC-Vosko')
## Plot settings
#xmin = 0
#xmax = 2
#ymin = 0
#ymax = n_xc_Vosko[1] + 0.2
#plt.axis([xmin, xmax, ymin, ymax])
plt.grid()
plt.xlabel("Iteration number", fontsize='x-large')
plt.ylabel("Energy [Ha]", fontsize='x-large')
plt.xticks(fontsize='large')
plt.yticks(fontsize='large')
#plt.legend(loc='upper right', shadow=False, fontsize='x-large')
x = np.arange(1, len(E_vec) + 1)
plt.plot(x, E_vec)
## Display the plot
plt.show()
return 0
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
| jdbosser/DFT-Jupyter | Notebooks/heliumdft.py | heliumdft.py | py | 2,534 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": ... |
37752345818 | import sys
# Prevent spurious errors during `python setup.py test`, a la
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html:
try:
import multiprocessing
except ImportError:
pass
from setuptools import setup, find_packages
extra_setup = {}
if sys.version_info >= (3,):
extra_setup['use_2to3'] = True
setup(
name='verifier_date_utils',
version='1.0',
description='Various utilities for operating on dates and times',
long_description=open('README.rst').read(),
author='Erik Rose',
packages=find_packages(exclude=['ez_setup']),
tests_require=['nose', 'mock'],
test_suite='nose.collector',
url='https://github.com/civiccc/verifier_date_utils',
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries',
],
keywords=['date', 'parse', 'time'],
**extra_setup
)
| civiccc/verifier_date_utils | setup.py | setup.py | py | 1,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 23,
"usage_type": "call"
}
] |
36492860982 | #!/usr/bin/env python3
from __future__ import absolute_import, division, print_function, unicode_literals
from . import invoke_rpc_builtin, invoke_rpc_python_udf
from . import ProcessGroupAgent
from .internal_rpc_utils import serialize, PythonUDF
import sys
import torch
from enum import Enum
_agent = None
def _require_initialized(func):
def wrapper(*args, **kwargs):
if _agent is None:
raise RuntimeError("RPC has not been initialized. "
"Call init_rpc(name) first.")
return func(*args, **kwargs)
return wrapper
def join_rpc():
r"""
Block until all local and remote RPC processes reach this method, process
(send and receive) all pending messages, and then destroy local RPC agent.
Every RPC process must call this method before exit.
"""
global _agent
if _agent:
_agent.join()
_agent = None
@_require_initialized
def sync_rpc():
r"""
Block until all local and remote RPC processes reach this method and finish
sending all pending RPCs. As this method synchronizes at the process
level, if multiple threads are spawned, only one of them should call this
method at a time.
"""
_agent.sync()
class RpcBackend(Enum):
PROCESS_GROUP = 1
# TODO: add a context manager to wrap _init_rpc and join_rpc
def _init_rpc(name, backend=RpcBackend.PROCESS_GROUP):
if sys.version_info < (3, 0):
raise RuntimeError("RPC package does not support Python2.")
global _agent
if _agent:
raise RuntimeError("RPC is already initialized")
if backend == RpcBackend.PROCESS_GROUP:
from .distributed_c10d import _get_default_group
group = _get_default_group()
# TODO: add try-except and destroy _agent in all processes if any fails.
_agent = ProcessGroupAgent(name, group)
else:
raise RuntimeError("Unrecognized RPC backend ", backend)
@_require_initialized
def get_worker_id(worker_name=None):
r"""
Get worker id of a given worker name. Use this worker id to avoid passing
an expensive string to ``rpc`` on every invocation.
Arguments:
worker_name (str): the string name of a worker. If ``None``, return the
the id of the current worker. (default ``None``)
"""
if worker_name:
return _agent.get_worker_id(worker_name)
else:
return _agent.get_worker_id()
@_require_initialized
def rpc(to, func, args=None, kwargs=None, async_call=False):
r"""
Make an RPC call to run function ``func`` on worker ``to``. By default, it
blocks until the return value is locally available. RPC messages are sent
and received in parallel to execution of Python code. This method is
thread-safe.
Arguments:
to (int or str): id or name of the destination worker.
func (callable): any callable function. builtin functions (like
``torch.add``) can be sent over RPC more efficiently.
args (tuple): the argument tuple for the ``func`` invocation.
kwargs (dict): is a dictionary of keyword arguments for the ``func``
invocation.
async_call (bool): If set to ``True``, this will be an asynchronous RPC,
and returns a ``torch.distributed.FutureMessage``
object immediately. Otherwise, this RPC will block
until the return value is locally available.
(default: ``False``)
Returns:
If ``async_call`` is ``False``, returns the result of running ``func``
on ``args`` and ``kwargs``. If ``async_call`` is ``True``, returns a
``torch.distributed.FutureMessage`` object that can be waited on. When
completed, the return value of ``func`` on ``args`` and ``kwargs`` can
be retrieved from the ``FutureMessage`` object.
Example::
Synchronous example:
On worker 0:
>>> import torch.distributed as dist
>>> dist.init_process_group(backend='gloo', rank=0, world_size=2)
>>> dist.init_model_parallel("worker0")
>>> ret = dist.rpc("worker1", torch.add, args=(torch.ones(2), 3))
>>> dist.join_rpc()
One worker 1:
>>> import torch.distributed as dist
>>> dist.init_process_group(backend='gloo', rank=1, world_size=2)
>>> dist.init_model_parallel("worker1")
>>> dist.join_rpc()
Asynchronous example:
On worker 0:
>>> import torch.distributed as dist
>>> dist.init_process_group(backend='gloo', rank=0, world_size=2)
>>> dist.init_model_parallel("worker0")
>>> worker1 = dist.get_worker_id("worker1")
>>> fut1 = dist.rpc(worker1, torch.add, args=(torch.ones(2), 3), async_call=True)
>>> fut2 = dist.rpc(worker1, min, args=(1, 2), async_call=True)
>>> result = fut1.wait() + fut2.wait()
>>> dist.join_rpc()
One worker 1:
>>> import torch.distributed as dist
>>> dist.init_process_group(backend='gloo', rank=1, world_size=2)
>>> dist.init_model_parallel("worker1")
>>> dist.join_rpc()
"""
if not callable(func):
raise TypeError("function should be callable.")
qualified_name = torch.jit._find_builtin(func)
args = args if args else ()
kwargs = kwargs if kwargs else {}
if isinstance(to, str):
to = get_worker_id(to)
if qualified_name is not None:
fut = invoke_rpc_builtin(_agent, to, qualified_name, *args, **kwargs)
else:
fut = invoke_rpc_python_udf(_agent, to, serialize(PythonUDF(func, args, kwargs)))
if async_call:
return fut
else:
return fut.wait()
| reynoldsm88/pytorch | torch/distributed/rpc.py | rpc.py | py | 5,784 | python | en | code | null | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "distributed_c10d._get_default_group",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "t... |
4543795483 | import logging
import requests
class Move(object):
def __init__(self, url, full_obj=None):
self.full_obj = full_obj or self.get_from_api(url)
self._name = None
self._power = 0
@property
def name(self):
if self.full_obj is None:
self.full_obj = self.get_move_from_api()
self._name = self.full_obj['name']
return self._name
@property
def power(self):
if self.full_obj is None:
self.full_obj = self.get_move_from_api()
p = self.full_obj['power']
if p is None:
p = 0.0
self._power = p
return self._power
def get_from_api(self, url):
""" Retrieves move information from web API """
if url is None:
return { "name": None, "power": None }
logging.info("Retrieving move from api url: {}".format(url))
res = requests.get(url)
res.raise_for_status()
info = res.json()
return info
| yehted/pokemon | src/pokemon/moves.py | moves.py | py | 998 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.info",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
}
] |
41669892511 | # -*- coding: utf-8 -*-
import os
import tensorflow as tf
from keras.applications.vgg16 import VGG16
from keras.applications.resnet50 import ResNet50,preprocess_input,decode_predictions
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential,Model
from keras.layers import Input,Activation,Dropout,Flatten,Dense
from keras import optimizers
import numpy as np
import time
import glob
from PIL import Image
from pathlib import Path
tf.test.gpu_device_name()
def getdsample(cat,datadir):
allfiles=[]
for i,c in enumerate(cat):
files=glob.glob(datadir+cat+"/*.png")
print("category : {} sampledata : {}".format(cat,len(files)))
for f in files:
allfiles.append(f)
return len(allfiles)
def main():
classes=["ドライヤー","電気ケトル","オフィスチェア","ハサミ","コンセント"]
classNums=len(classes)
category=["dryer","kettle","officechair","scissors","outlet"]
img_width,img_height=400,400
traindatadir="./objdata/train/"
testdatadir="./objdata/test/"
trainSmp=getdsample(category,traindatadir)
testSmp=getdsample(category,testdatadir)
batchSize=100
epoch=20
trainDataset=ImageDataGenerator(
rescale=1.0/224,
zoom_range=0.2,
horizontal_flip=True
)
validationDataset=ImageDataGenerator(rescale=1.0/224)
traindata=trainDataset.flow_from_directory(
traindatadir,
target_size=(img_width,img_height),
color_mode='rgb',
classes=classes,
class_mode='categorical',
batch_size=batchSize,
shuffle=True
)
validata=validationDataset.flow_from_directory(
testdatadir,
target_size=(img_width,img_height),
color_mode='rgb',
classes=classes,
class_mode='categorical',
batch_size=batchSize,
shuffle=True
)
model=ResNet50(include_top=True,weights='imagenet',input_tensor=None,pooling=None)
top_model=Sequential()
top_model.add(Flatten(input_shape=model.output_shape[1:]))
top_model.add(Dense(256,actvation='relu'))
top_model.add(Dropout(0.6))
top_model.add(Dense(classNums,activation='softmax'))
resnetmodel=Model(input=model.input,output=top_model(model.output))
for layer in resnetmodel.layers[:15]:
layer.trainable=False
resnetmodel.compile(
loss='categorical_crossentropy',
optimizer=optimizers.SGD(lr='1e-3',momentum=0.9),
metrics=['accuracy']
)
history=resnetmodel.fit_generator(
traindata,
samples_per_epoch=trainSmp,
nb_epoch=epoch,
validation_data=validata,
nb_val_samples=testSmp
)
print(history.history['acc'])
print(history.history['val_acc'])
| POD-azlamarhyu/Python_resnet_recognition | src/resnet2.py | resnet2.py | py | 2,806 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.test.gpu_device_name",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tensorflow.test",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "glob.glob",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "keras.... |
37428719988 | from gl import *
from pyglet import resource
Mat4Floats = GLfloat*4
NOTEXTURES = False
class MDLdict(object):
"""Materials display lists"""
def __init__(self):
self.mat_dls = {}
self.mat_textures = {}
self.mat_trans = {}
def __del__(self):
if glDeleteLists:
for dl in self.mat_dls.values():
glDeleteLists(dl,2)
def get(self,k):
return self.mat_dls.get(k)
def load(self,fname):
mtllines = resource.file(fname,"ru")
mname = None
mat_dl = None
mat_params = {'Ka':GL_AMBIENT, 'Kd': GL_DIFFUSE,
'Ks':GL_SPECULAR, 'Ke':GL_EMISSION}
tname = None
for line in mtllines:
tokens = line.split()
if not tokens or line[0] == '#':
continue
if tokens[0] == 'newmtl':
if mname:
if not tname: glDisable(GL_TEXTURE_2D)
glEndList()
tname = None
mname = tokens[1]
mat_dl = self.mat_dls.get(mname)
if mat_dl is None:
mat_dl = self.mat_dls[mname] = glGenLists(2)
glNewList(mat_dl, GL_COMPILE)
elif tokens[0] == 'Ns':
glMaterialf(GL_FRONT, GL_SHININESS, float(tokens[1]))
elif tokens[0] in mat_params:
params = map(float,tokens[1:])
floats4 = Mat4Floats(1.0,1.0,1.0,1.0)
for i,f in enumerate(params):
floats4[i] = f
self.mat_trans[mname] = (floats4[3] < 1.0)
glMaterialfv(GL_FRONT, mat_params[tokens[0]],floats4)
elif tokens[0] == 'map_Kd' and not NOTEXTURES:
# need a texture
glEnable(GL_TEXTURE_2D)
glCallList(mat_dl+1) # will bind texture
glEndList()
tname = tokens[1]
tex = resource.texture(tokens[1])
glNewList(mat_dl+1,GL_COMPILE)
if tex:
self.mat_textures[tname] = tex
trans = self.mat_trans.get(mname,False)
self.mat_trans[mname] = trans
glEnable(GL_TEXTURE_2D)
glBindTexture(GL_TEXTURE_2D,tex.id)
# will end list before starting next one, or at end
if mname:
if not tname: glDisable(GL_TEXTURE_2D)
glEndList()
def select(self,k):
dl = self.get(k)
if dl:
glCallList(dl)
def is_transparent(self,k):
return self.mat_trans.get(k,False)
default_mdl_dict = MDLdict()
def load(fname):
default_mdl_dict.load(fname)
def get(matname):
return default_mdl_dict.get(matname)
def select(matname):
default_mdl_dict.select(matname)
def is_transparent(matname):
return default_mdl_dict.is_transparent(matname)
| scavpy/Scav-Team-Pyweek-Aug-2010 | gamelib/tdgl/material.py | material.py | py | 2,940 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pyglet.resource.file",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pyglet.resource",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "pyglet.resource.texture",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pyglet.r... |
24968801455 | from typing import List
from collections import deque
class Solution:
def nearestExit(self, maze: List[List[str]], entrance: List[int]) -> int:
nei = [[-1, 0], [1, 0], [0, -1], [0, 1]]
dq, seen = deque(), set()
dq.append(entrance)
row, col = len(maze), len(maze[0])
steps = 0
while dq:
n = len(dq)
for _ in range(n):
i, j = dq.popleft()
# print(f'popped: {i}, {j}')
if (i, j) in seen:
continue
seen.add((i, j))
if (i == 0 or i == row - 1
or j == 0 or j == col - 1) \
and [i, j] != entrance:
return steps
for di, dj in nei:
new_i, new_j = i + di, j + dj
if 0 <= new_i < row \
and 0 <= new_j < col \
and maze[new_i][new_j] == '.':
dq.append((new_i, new_j))
steps += 1
return -1
sol = Solution()
# print(sol.nearestExit(maze=[["+", "+", ".", "+"],
# [".", ".", ".", "+"],
# ["+", "+", "+", "."]],
# entrance=[1, 2],
# ))
# print(sol.nearestExit(maze=[["+", "+", "+"], [".", ".", "."], ["+", "+", "+"]],
# entrance=[1, 0],
# ))
# print(sol.nearestExit(maze=[["+", "+", ".", "+"], [".", ".", ".", "+"], ["+", "+", "+", "."]],
# entrance=[1, 2],
# ))
| inverseTrig/leet_code | 1926_nearest_exit_from_entrance_in_maze.py | 1926_nearest_exit_from_entrance_in_maze.py | py | 1,646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
25686410069 | import csv
import os
from keras.models import load_model
import numpy as np
import scipy.io as sio
model = load_model("models/trained_model_h5.h5")
def load_data(record_path, start_t, end_t):
data = sio.loadmat(record_path)
signal = data['ECG']['data'][0][0]
signal = signal[:, start_t:end_t]
features = np.empty(60)
features[:12] = signal.max(axis=1)
features[12:24] = signal.min(axis=1)
features[24:36] = signal.mean(axis=1)
features[36:48] = signal.std(axis=1)
features[48:] = signal.max(axis=1) + signal.min(axis=1)
return np.expand_dims(signal.T, 0), np.expand_dims(features, 0)
def get_data_reference_dict(reference_path) -> dict:
reference_dict = {}
with open(reference_path, "r") as f_obj:
reader = csv.reader(f_obj)
for row in reader:
name = row[0]
if name == 'Recording':
continue
del row[0]
reference_dict[name] = [int(x) for x in row if x]
reference_dict[name].sort()
return reference_dict
def get_data_and_delin_list(data_path):
data_list = os.listdir(data_path)
data_list.sort()
reference_path = os.path.join(data_path, "REFERENCE.csv")
data_reference_dict = get_data_reference_dict(reference_path)
if 'REFERENCE.csv' in data_list:
data_list.remove('REFERENCE.csv')
if '.DS_Store' in data_list:
data_list.remove('.DS_Store')
return data_list, data_reference_dict
def load_and_predict_data_for_one_slice(data_path, start_t, end_t):
signal, features = load_data(data_path, start_t, end_t)
prediction = model.predict([signal, features])
result = prediction.tolist()
return prediction, result[0]
def load_and_predict_data(data_path):
prediction = []
result = []
data = sio.loadmat(data_path)
signal = data['ECG']['data'][0][0]
max_len = signal.shape[1]
for start_t in range(0, max_len - 3000, 500):
pred, res = load_and_predict_data_for_one_slice(data_path, start_t, start_t + 3000)
prediction.append(pred)
result.append(res)
answer_list = []
for i in range(len(result[0])):
sum = 0
for j in range(len(result)):
sum = sum + result[j][i]
answer_list.append(sum)
answer = np.argmax(answer_list) + 1
return prediction, answer, answer_list
def main():
data_folder_path = '../DATA/validation_set'
data_list, data_reference_dict = get_data_and_delin_list(data_folder_path)
print('good data size:', len(data_list))
results = {}
for data in data_list:
data_path = os.path.join(data_folder_path, data)
prediction, result, result_list = load_and_predict_data(data_path)
results[data] = result
with open("results.csv", 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['№', 'Recording', 'Reference', 'Prediction', 'Correct'])
correct_predictions = 0
for idx, (key, value) in enumerate(results.items()):
name = key.split('.')[0]
reference = ", ".join(str(x) for x in data_reference_dict[name])
if value in data_reference_dict[name]:
correct_predictions = correct_predictions + 1
writer.writerow([idx + 1, name, reference, value, value in data_reference_dict[name]])
writer.writerow([len(results), '', '', '', correct_predictions / len(results)])
csvfile.close()
if __name__ == "__main__":
main()
| AlexPetukhov/nnsu | GOOD/6sec/apply_model_no_delin.py | apply_model_no_delin.py | py | 3,501 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "keras.models.load_model",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "scipy.io.loadmat",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.empty",
"l... |
722631652 | """
Реализовать два небольших скрипта:
а) итератор, генерирующий целые числа, начиная с указанного,
б) итератор, повторяющий элементы некоторого списка, определенного заранее.
Подсказка:
Использовать функцию count() и cycle() модуля itertools.
Обратите внимание, что создаваемый цикл не должен быть бесконечным.
Необходимо предусмотреть условие его завершения.
Например, в первом задании выводим целые числа, начиная с 3, а при достижении числа 10 завершаем цикл.
Во втором также необходимо предусмотреть условие, при котором повторение элементов списка будет прекращено.
"""
# Импортируем из модуля functools функции count() и cycle().
from itertools import count, cycle
# Итератор, генерирующий целые числа, начиная с указанного.
print("-" * 20, "Итератор а")
# Значение, с которого начинается вывод чисел
start_iterator = 3
# Выводим числа с start_iterator.
# Если число достигло 10, вывод чисел прекращается.
for el in count(start_iterator):
if el > 10:
break
print(el)
# Итератор, повторяющий элементы некоторого списка, определенного заранее.
print("-" * 20, "Итератор б")
# Задаем условия:
# 1. Создаем список, элементы которого будут повторятся.
# 2. Задаем максимальное число повторений этих элементов
# 3. Создаем переменную, которая будет проверять количество повторений.
cycling_list = [5, 4, 3, 2, 1, 0]
max_iterations = 15
iteration_count = 0
# Запускаем цикл.
# 1. Выводим на экран переменные из нашего списка.
# 2. Увеличивает переменную, отвечающую за проверку повторений.
# 3. Делаем проверку. Если количество повторений больше/равно макс.повторениям, то прекращаем цикл.
for el in cycle(cycling_list):
print(el)
iteration_count += 1
if iteration_count >= max_iterations:
break
| Jaidergan/Lesson04 | lesson04/example06.py | example06.py | py | 2,775 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "itertools.count",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 42,
"usage_type": "call"
}
] |
24722131243 | #-*-coding:utf-8-*-
"""
This is base modoul bank
"""
import imp
import json
import urllib2
import urllib
import os
import time
import sys
from thePath import rootPath
class BANK(object):
"""
Base class
"""
def __init__(self):
self.useragent = (
"Mozilla/5.0 (Linux; Android 6.0;"
" Nexus 5 Build/MRA58N) AppleWebKit/537.36"
" (KHTML, like Gecko) "
"Chrome/60.0.3112.113 Mobile Safari/537.36"
)
self.headers={'User-Agent': self.useragent}
self.time = time.strftime("%Y-%m-%d")
def getHtml(self, url):
"""
This is to get html code
"""
try:
request = urllib2.Request(url, headers = self.headers)
response = urllib2.urlopen(request, timeout = 60)
pageCode = response.read()
return pageCode
except urllib2.URLError as e:
if hasattr(e, "reason"):
return None
def getOutFile(self, fileName):
"""
The function is to get outfile
"""
outFile = self.getDirectory() + fileName
if os.path.exists(outFile):
os.system("rm " + outFile)
return outFile
def getDirectory(self):
"""
The function is to get directory
"""
backdir = (rootPath + \
"data/primaryData/BANK/")
Datedir = backdir + self.time + "/"
if not os.path.exists(Datedir):
try:
os.makedirs(Datedir)
except OSError as e:
return Datedir
return Datedir
def saveFile(self, item, outFile):
"""
The function is to save file
"""
fp = open(outFile, 'a+')
li = json.dumps(item, ensure_ascii=False) + '\n'
fp.write(li.encode('utf8').replace("\\r", "-").replace("\\n","-")\
.replace("\\", "-").replace("\\t","-"))
fp.close()
| WYL-BruceLong/bank_spider | superBank.py | superBank.py | py | 2,022 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.strftime",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "urllib2.Request",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "urllib2.urlopen",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "urllib2.URLError",
... |
2671644316 | import math
import os
import sys
import numpy as np
import torch
from torch import nn
from torch.nn import Conv2d
from torch.nn import functional as F
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from . import commons, modules
from .commons import get_padding
from .modules import (ConvNext2d, HarmonicEmbedder, IMDCTSymExpHead,
LoRALinear1d, SnakeFilter, WaveBlock)
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(parent_dir)
sr2sr = {
"24k": 24000,
"32k": 32000,
"40k": 40000,
"48k": 48000,
}
class GeneratorVoras(torch.nn.Module):
def __init__(
self,
emb_channels,
inter_channels,
gin_channels,
n_layers,
sr,
hop_length,
):
super(GeneratorVoras, self).__init__()
self.n_layers = n_layers
self.emb_pitch = HarmonicEmbedder(768, inter_channels, gin_channels, 16, 15) # # pitch 256
self.plinear = LoRALinear1d(inter_channels, inter_channels, gin_channels, r=8)
self.glinear = weight_norm(nn.Conv1d(gin_channels, inter_channels, 1))
self.resblocks = nn.ModuleList()
self.init_linear = LoRALinear1d(emb_channels, inter_channels, gin_channels, r=4)
for _ in range(self.n_layers):
self.resblocks.append(WaveBlock(inter_channels, gin_channels, [9] * 2, [1] * 2, [1, 9], 2, r=4))
self.head = IMDCTSymExpHead(inter_channels, gin_channels, hop_length, padding="center", sample_rate=sr)
self.post = SnakeFilter(4, 8, 9, 2, eps=1e-5)
def forward(self, x, pitchf, x_mask, g):
x = self.init_linear(x, g) + self.plinear(self.emb_pitch(pitchf, g), g) + self.glinear(g)
for i in range(self.n_layers):
x = self.resblocks[i](x, x_mask, g)
x = x * x_mask
x = self.head(x, g)
x = self.post(x)
return torch.tanh(x)
def remove_weight_norm(self):
self.plinear.remove_weight_norm()
remove_weight_norm(self.glinear)
for l in self.resblocks:
l.remove_weight_norm()
self.init_linear.remove_weight_norm()
self.head.remove_weight_norm()
self.post.remove_weight_norm()
def fix_speaker(self, g):
self.plinear.fix_speaker(g)
self.init_linear.fix_speaker(g)
for l in self.resblocks:
l.fix_speaker(g)
self.head.fix_speaker(g)
def unfix_speaker(self, g):
self.plinear.unfix_speaker(g)
self.init_linear.unfix_speaker(g)
for l in self.resblocks:
l.unfix_speaker(g)
self.head.unfix_speaker(g)
class Synthesizer(nn.Module):
def __init__(
self,
segment_size,
n_fft,
hop_length,
inter_channels,
n_layers,
spk_embed_dim,
gin_channels,
emb_channels,
sr,
**kwargs
):
super().__init__()
if type(sr) == type("strr"):
sr = sr2sr[sr]
self.segment_size = segment_size
self.n_fft = n_fft
self.hop_length = hop_length
self.inter_channels = inter_channels
self.n_layers = n_layers
self.spk_embed_dim = spk_embed_dim
self.gin_channels = gin_channels
self.emb_channels = emb_channels
self.sr = sr
self.dec = GeneratorVoras(
emb_channels,
inter_channels,
gin_channels,
n_layers,
sr,
hop_length
)
self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels)
print(
"gin_channels:",
gin_channels,
"self.spk_embed_dim:",
self.spk_embed_dim,
"emb_channels:",
emb_channels,
)
self.speaker = None
def remove_weight_norm(self):
self.dec.remove_weight_norm()
def change_speaker(self, sid: int):
if self.speaker is not None:
g = self.emb_g(torch.from_numpy(np.array(self.speaker))).unsqueeze(-1)
self.dec.unfix_speaker(g)
g = self.emb_g(torch.from_numpy(np.array(sid))).unsqueeze(-1)
self.dec.fix_speaker(g)
self.speaker = sid
def forward(
self, phone, phone_lengths, pitch, pitchf, ds
):
g = self.emb_g(ds).unsqueeze(-1)
x = torch.transpose(phone, 1, -1)
x_mask = torch.unsqueeze(commons.sequence_mask(phone_lengths, x.size(2)), 1).to(phone.dtype)
x_slice, ids_slice = commons.rand_slice_segments(
x, phone_lengths, self.segment_size
)
pitchf_slice = commons.slice_segments2(pitchf, ids_slice, self.segment_size)
mask_slice = commons.slice_segments(x_mask, ids_slice, self.segment_size)
o = self.dec(x_slice, pitchf_slice, mask_slice, g)
return o, ids_slice, x_mask, g
def infer(self, phone, phone_lengths, pitch, nsff0, sid, max_len=None):
g = self.emb_g(sid).unsqueeze(-1)
x = torch.transpose(phone, 1, -1)
x_mask = torch.unsqueeze(commons.sequence_mask(phone_lengths, x.size(2)), 1).to(phone.dtype)
o = self.dec((x * x_mask)[:, :, :max_len], nsff0, x_mask, g)
return o, x_mask, (None, None, None, None)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, gin_channels, upsample_rates, final_dim=256, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
self.use_spectral_norm = use_spectral_norm
self.init_kernel_size = upsample_rates[-1] * 3
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
N = len(upsample_rates)
self.init_conv = norm_f(Conv2d(1, final_dim // (2 ** (N - 1)), (self.init_kernel_size, 1), (upsample_rates[-1], 1)))
self.convs = nn.ModuleList()
for i, u in enumerate(upsample_rates[::-1][1:], start=1):
self.convs.append(
ConvNext2d(
final_dim // (2 ** (N - i)),
final_dim // (2 ** (N - i - 1)),
gin_channels,
(u*3, 1),
(u, 1),
4,
r=2 + i//2
)
)
self.conv_post = weight_norm(Conv2d(final_dim, 1, (3, 1), (1, 1)))
def forward(self, x, g):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (n_pad, 0), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
x = torch.flip(x, dims=[2])
x = F.pad(x, [0, 0, 0, self.init_kernel_size - 1], mode="constant")
x = self.init_conv(x)
x = F.leaky_relu(x, modules.LRELU_SLOPE)
x = torch.flip(x, dims=[2])
fmap.append(x)
for i, l in enumerate(self.convs):
x = l(x, g)
fmap.append(x)
x = F.pad(x, [0, 0, 2, 0], mode="constant")
x = self.conv_post(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, upsample_rates, gin_channels, periods=[2, 3, 5, 7, 11, 17], **kwargs):
super(MultiPeriodDiscriminator, self).__init__()
discs = [
DiscriminatorP(i, gin_channels, upsample_rates, use_spectral_norm=False) for i in periods
]
self.ups = np.prod(upsample_rates)
self.discriminators = nn.ModuleList(discs)
def forward(self, y, y_hat, g):
fmap_rs = []
fmap_gs = []
y_d_rs = []
y_d_gs = []
for d in self.discriminators:
y_d_r, fmap_r = d(y, g)
y_d_g, fmap_g = d(y_hat, g)
y_d_rs.append(y_d_r)
y_d_gs.append(y_d_g)
fmap_rs.append(fmap_r)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
| w-okada/voice-changer | server/voice_changer/RVC/inferencer/voras_beta/models.py | models.py | py | 7,992 | python | en | code | 12,673 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"l... |
10863291429 | import pytest
from cleverwrap import CleverWrap
from cleverwrap.errors import UnknownAPIError
def test_init():
cw = CleverWrap("API_KEY")
assert cw.key == "API_KEY"
def test_say(mock_requests):
mock_requests.add(
mock_requests.GET,
'https://www.cleverbot.com/getreply?input=Hello&key=API_KEY&cs=&conversation_id=&wrapper=CleverWrap.py',
match_querystring=True,
json={
'cs': 'AAAABBCCDD',
'output': 'Hello.',
'interaction_count': "2",
'conversation_id': "foobar",
'time_taken': "15",
'time_elapsed': "13",
},
)
cw = CleverWrap("API_KEY")
assert cw.say("Hello") == "Hello."
assert cw.default_conversation.convo_id
cw.reset()
assert not cw.default_conversation.convo_id
def test_api_error(mock_requests):
cw = CleverWrap("API_KEY")
with pytest.raises(UnknownAPIError):
cw.say("Hello")
| TotallyNotRobots/cleverwrap.py | tests/test_cleverwrap.py | test_cleverwrap.py | py | 959 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cleverwrap.CleverWrap",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cleverwrap.CleverWrap",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cleverwrap.CleverWrap",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pyte... |
7137523065 | import mysql.connector as connector
import datetime as date
import logging
class User :
dates1 = date.datetime.now()
name = "RKFINANCE"
def __init__(self):
self.con = connector.connect(host='localhost',
user='root',
password='root',
database='RKFINANCE')
def create(self, name, phone, DOB, Address, LOAN,interest, password,total):
query = "insert into customers(name,phone,DOB,Address,LOAN,Interest,password,total)values('{}',{},'{}','{}',{},'{}','{}',{})".format(
name, phone, DOB, Address, LOAN,interest, password,total)
# print(query)
curr = self.con.cursor()
curr.execute(query)
self.con.commit()
print("Account created successfully at ", self.dates1)
def find_customer(self, account_no):
query = "select * from customers where userid ={}".format(account_no)
cur = self.con.cursor()
cur.execute(query)
res = cur.fetchall()
if res:
for row in res:
print("Account number : ", row[0])
print("name : ", row[1])
print("phone number : ", row[2])
print("DOB : ", row[3])
print("Address : ", row[4])
print(" balance : ", row[5])
print(" balance : ", row[6])
print(" balance : ", row[7])
print(" balance : ", row[8])
print()
print()
else:
print("USER DOESNT EXIST ! PLEASE TRY WITH VALID ACCOUNT ")
def delete_customer(self, account_no):
q = "select * from customers where userid = {}".format(account_no)
curr = self.con.cursor()
curr.execute(q)
res = curr.fetchall()
if res:
query = "delete from customers where userid = {}".format(account_no)
cur = self.con.cursor()
cur.execute(query)
res = cur.fetchone()
self.con.commit()
print("Your Account is deleted Successfully at ", self.dates1)
print()
else:
print("ACCOUNT DOESNT EXIST ! PLEASE TRY WITH VALID ACCOUNT")
def update_customer(self, account_no):
q = "select * from customers where userid = {}".format(account_no)
curr = self.con.cursor()
curr.execute(q)
res = curr.fetchall()
if res:
newName = input("enter newname : ").capitalize()
while True:
newphone = int(input('enter your phone number : '))
if len(str(newphone)) > 10 or len(str(newphone)) < 10:
print("Invalid phone number ! please enter 10 digit only ")
else:
break
newaddress = input("enter newaddress : ").capitalize()
query = "update customers set name = '{}' , phone = {} , Address = '{}' where userid= {} ".format(
newName, newphone, newaddress, account_no)
cur = self.con.cursor()
cur.execute(query)
self.con.commit()
print("Account details updated successfully at ", self.dates1)
else:
print("ACCOUNT DOESNT EXIST ! PLEASE TRY WITH VALID ACCOUNT")
def returnmoney(self,acc_no):
query = "select * from customers where userid ={}".format(acc_no)
cur = self.con.cursor()
cur.execute(query)
res = cur.fetchone()
n = len(res)
if res :
q = 'select total from customers where userid = {}'.format(acc_no)
curr = self.con.cursor()
curr.execute(q)
res1 = curr.fetchone()
print("your balance is : " , res[8])
return_amt = int(input("enter amount you want to return "))
remaining_amt = int(res1[0]) - return_amt
qu = "update customers set total = {} where userid = {}".format(remaining_amt,acc_no)
currr = self.con.cursor()
currr.execute(qu)
self.con.commit()
print("money returned successfully and updated ")
else :
print("user not found")
| Kitturamkrishna/RKFINANCE | User.py | User.py | py | 4,361 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "mysql.connector.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "mys... |
41493530569 | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 10:51:07 2021
@author: REMI DECOUTY, DAMIEN LU
"""
import pandas
import glob, os
import igraph as ig
import math
# récupération de la liste de tous les fichiers à étudier
os.chdir("data")
allCsvFiles = glob.glob("*")
os.chdir("..")
# coloration d'une arête, en fonction du type de liaison
def color_edge(node, index):
arrayPairType = []
if (index != -1):
arrayPairType = node.split(',')
cases = {
"cWW": "blue",
"tWW": "red",
"cWH": "orange",
"cHW": "orange",
"tWH": "yellow",
"tHW": "yellow",
"cWS": "brown",
"cSW": "brown",
"tWS": "magenta",
"tSW": "magenta",
"cHH": "cyan",
"tHH": "purple",
"cHS": "pink",
"cSH": "pink",
"tHS": "green",
"tSH": "green",
"cSS": "maroon",
"tSS": "gray",
}
if (index != -1):
return (cases.get(arrayPairType[index]))
else:
return (cases.get(node))
# fonction d'affichage du graphe, à partir du nom d'un fichier au format CSV
def draw_graph_from_csv(file):
global filename
filename = file
df = pandas.read_csv("data/" + filename)[['index_chain','paired','pair_type_LW']]
global g # déclaration d'une variable g globale
g = ig.Graph() # initialisation du graphe
if (df['index_chain'][0] != 1): # index_chain pas au bon format (nucléotide numéroté de 1 à N)
print ("\nLe fichier " + file + " n'est pas au bon format ! \n\n")
return
for node in df.iterrows():
g.add_vertices(1) # ajout des noeuds (1 nucléotide = 1 noeud)
for i in range(len(g.vs)):
g.vs[i]["label"]= str(i+1) # label du noeud, correspondant à la valeur index_chain
for i in range(len(g.vs)):
currentNode = df['index_chain'][i] # noeud parcouru actuellement
pairedNode = str(df['paired'][i]) # liste des nucléotides possédant une interaction avec le noeud actuel
# liaisons phosphodiester entre les nucléotides
if i != len(g.vs)-1:
g.add_edges([(i,i+1)])
# interactions canoniques entre certains nucléotides
if pairedNode.find(",") != -1: # la liste "paired" contient plus d'un élément
arrayPairedNodes = pairedNode.split(',') # on récupère chacun des nucléotides de la liste
for node in arrayPairedNodes:
if (int(node) in df['index_chain'].tolist()): # le nucléotide apparié existe bien dans la chaîne
if not g.are_connected(int(node)-1,currentNode-1): # si l'arête n'existe pas déjà dans l'autre sens, alors on la crée
g.add_edges([(currentNode-1,int(node)-1)]) # ajout dans le graphe de la liason canonique
res_edge = g.es.find(_source=currentNode-1, _target=int(node)-1)
res_edge["color"] = color_edge(df['pair_type_LW'][i], arrayPairedNodes.index(node))
else:
pairedNode = pandas.to_numeric(df['paired'][i]) # cast d'un string en int, afin de vérifier si le nucléotide n'est pas apparié (NaN)
if not math.isnan(pairedNode) and int(pairedNode) in df['index_chain'].tolist():
if not g.are_connected(int(pairedNode)-1,currentNode-1):
g.add_edges([(currentNode-1,int(pairedNode)-1)])
res_edge = g.es.find(_source=currentNode-1, _target=int(pairedNode)-1)
res_edge["color"] = color_edge(df['pair_type_LW'][i],-1)
visual_style = {}
visual_style["edge_width"] = 3
g.vs["color"] = "white"
# find_subgraph : fonction de recherches des sous-graphes, à partir d'un motif issu de carnaval
# Le principe est le suivant : la fonction get_subisomorphisms_vf2 de la librairie igraph renvoie
# la liste des sous-graphes isomorphes entre deux graphes. De plus, la fonction de comparaison compare_edges
# va vérifier pour chaque arête 2 à 2 si leur couleur est identique ou non.
# Si tel est le cas, alors on aura trouvé un sous-graphe correspondant au motif
# Enfin, subgraph_list, qui contient la liste de tous les sous-graphes trouvés, peut contenir des doublons
# Ex: [0,2,4,1] et [4,2,0,1]
# Pour éviter cela, on va filter subgraph_list dans un set, pour supprimer tous ces doublons
def find_subgraph(graph, motif, motif_name):
def compare_edges(g1, g2, i1, i2):
try:
result = (g1.es[i1]['color'] == g2.es[i2]['color'])
except:
return False
else:
return result
subgraph_list = g.get_subisomorphisms_vf2(motif,edge_compat_fn=compare_edges)
if len(subgraph_list) >= 1:
results = [tuple(x) for x in set(map(frozenset, subgraph_list))]
print ("Le motif " + motif_name + " est présent " + str(len(results)) + " fois dans la chaîne d'ARN " + filename + "\n")
for result in sorted(results):
print (result)
print ("\n\n")
return len(results)
else:
print ("Le motif " + motif_name + " est absent de la chaîne d'ARN " + filename + "\n\n")
return 0
# initialisation des motifs RIN issus de Carnaval, sous forme de graphe
# la couleur des arêtes permet d'identiier le type de liaison pair_type_LW
def transorm_RIN_to_graph():
global rin_23, rin_129
rin_23 = ig.Graph()
rin_129 = ig.Graph()
rin_129.add_vertices(4)
rin_129.add_edges([(0,1),(1,2),(2,3),(0,3)])
rin_129.vs["color"] = "white"
for i in range(len(rin_129.vs)):
rin_129.vs[i]["label"]= str(i+1)
res_edge = rin_129.es.find(_source=1, _target=2)
res_edge["label"] = "tWW" # liaison tWW
res_edge["color"] = "red"
res_edge = rin_129.es.find(_source=0, _target=3)
res_edge["label"] = "tHS" # liaison tHS (d'après la nomenclature officielle)
res_edge["color"] = "green"
rin_23.add_vertices(8)
rin_23.add_edges([(0,1),(1,2),(2,3),(3,4),(4,5),(5,7),(1,6),(6,7),(1,4),(0,5)])
rin_23.vs["color"] = "white"
for i in range(len(rin_23.vs)):
rin_23.vs[i]["label"]= str(i+1)
res_edge = rin_23.es.find(_source=0, _target=5)
res_edge["label"] = "cWW"
res_edge["color"] = "blue"
res_edge = rin_23.es.find(_source=1, _target=4)
res_edge["label"] = "cWW"
res_edge["color"] = "blue"
res_edge = rin_23.es.find(_source=2, _target=3)
res_edge["label"] = "cWW"
res_edge["color"] = "blue"
res_edge = rin_23.es.find(_source=1, _target=6)
res_edge["label"] = "cSS"
res_edge["color"] = "maroon"
res_edge = rin_23.es.find(_source=5, _target=7)
res_edge["label"] = "tSS"
res_edge["color"] = "gray"
transorm_RIN_to_graph()
# Le code ci-dessous permet de parcourir tous les fichiers pour compter le nombre total de motif.
# Il est déconseillé d'exécuter le programme ci-dessous si votre machine n'est pas très puissante,
# le programme mettra plusieurs dizaines de minutes à s'éxécuter.
# Il faudra alors privilégier l'analyse fichier par fichier, en utilisant la version projet.py.
# En effet, on crée ici un pool de threads, égal au maximum des capacités de notre machine
# (cpu_count = nombre max de threads par coeur), on va ainsi utiliser au maximum les capacités du processeur.
# Les threads crées vont exécuter de manière parallèle la fonction count_nb_occurences, qui renvoie le nombre
# de motif rin_23 et rin_129 pour un fichier CSV donné.
# Le gain de performance en terme de temps d'exécution est assez énorme.
# Par exemple, sur une machine assez ancienne disposant de 4 coeurs, avec 2 threads par coeur,
# le temps d'exécution multithreadé est de 20 minutes, contre plus d'1 heure et 20 minutes en monothread.
# Sur une machine plus récente et plus puissante, le programme devrait probablement s'éxécuter plus rapidement.
# Pour réussir à diminuer le temps d'éxécution, nous avons enlevé les affichages des graphes et les messages
# dans la console, qui auraient monopolisé les ressources systèmes (17120 graphes à afficher !)
import multiprocessing
def count_nb_occurences(csvFile):
results = []
draw_graph_from_csv(csvFile)
results.append(find_subgraph(g,rin_23,"rin_23"))
results.append(find_subgraph(g,rin_129,"rin_129"))
return results
if __name__ == '__main__':
# Make the Pool of workers
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
count_rins = ((pool.map(count_nb_occurences, allCsvFiles)))
# Close the pool and wait for the work to finish
pool.close()
pool.join()
result = [sum(x) for x in zip(*count_rins)]
print("Nombre d'occurences de RIN 23 : " + str(result[0]))
print("Nombre d'occurences de RIN 129 : " + str(result[1]))
| dlu02/projet-bioinfo | projet_multithread.py | projet_multithread.py | py | 9,281 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 73,
... |
21902537705 | import subprocess
import time
from wifi import Cell
import pathlib
script_path = pathlib.Path(__file__).parent.resolve()
import sys
sys.path.append('../')
from common import mesh_utils
def scan_wifi(interface):
'''
Scan wifi with the AuthAP pattern.
If more than one, select the best quality one.
If none, return false.
'''
killall(interface)
print('Scanning APs')
possible = False
aps = Cell.all(interface)
max = 0
for ap in list(aps):
if ap.ssid is not None and 'AuthAP' in ap.ssid:
qua = float(ap.quality.split('/')[0]) / float(ap.quality.split('/')[1])
if qua > max:
possible = ap.ssid
max = qua
return possible
def connect_wifi(candidate, interface):
'''
Connect to the best Ap selected in scan_wifi()
we are using apmanager.sh for this
'''
killall(interface)
command = [str(script_path) + '/apmanager.sh', '-ap_connect', candidate, interface]
subprocess.call(command, shell=False)
def killall(interface):
subprocess.call(['/etc/init.d/S50avahi-daemon', 'stop'], shell=False)
if not mesh_utils.verify_mesh_status(): # verifying that mesh is running
subprocess.call(['killall', 'wpa_supplicant'], shell=False)
subprocess.call(['killall', 'hostapd'], shell=False)
subprocess.call(['ifconfig', interface, 'down'], shell=False)
subprocess.call(['ifconfig', interface, 'up'], shell=False)
def create_ap(ID, interface):
'''
If none AuthAP is available, then create a new one.
Using apmanager.sh
'''
killall(interface)
time.sleep(2)
command = [str(script_path) + '/apmanager.sh', '-ap_create', ID, interface]
subprocess.call(command, shell=False)
def clean_ap(ID, interface):
'''
If none AuthAP is available, then create a new one.
Using apmanager.sh
'''
killall(interface)
time.sleep(2)
command = [str(script_path) + '/apmanager.sh', '-ap_remove', ID, interface]
subprocess.call(command, shell=False)
| tiiuae/mesh_com | modules/sc-mesh-secure-deployment/src/1_5/features/mutual/utils/wifi_ssrc.py | wifi_ssrc.py | py | 2,045 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "wifi.Cell.all",
"line_num... |
2538475868 | import uuid
from django.db import models
from django.db.models.deletion import CASCADE, SET_NULL
# Create your models here.
"""
Crear un nuevo proyecto Django, con una app llamada PRODUCTOS, que sirva para manejar un catálogo de productos representado por las siguientes entidades:
Producto(nombre, descripcion, url_imagen, precio_unidad, categoria)
Categoria(nombre, categoria_padre)
Algunas notas:
Un producto pertenece a una y solo una categoría
Una categoría no tiene porqué tener una categoría padre.
"""
class Categoria(models.Model):
id=models.UUIDField(primary_key=True,default=uuid.uuid4, editable=False)
nombre=models.CharField(max_length=20)
categoria_padre=models.ForeignKey('Categoria',on_delete=CASCADE,blank=True,null=True)
class Meta:
ordering=["nombre"]
def __str__(self):
return self.nombre
class Producto(models.Model):
id=models.UUIDField(primary_key=True,default=uuid.uuid4, editable=False)
nombre=models.CharField(max_length=20)
descripcion=models.TextField('Descripcion',max_length=100)
url_imagen=models.URLField('Imagen')
precio_unidad=models.DecimalField('Precio',max_digits=6, decimal_places=2)
categoria=models.ForeignKey(Categoria,on_delete=SET_NULL,null=True)
class Meta:
ordering=["nombre"]
def __str__(self):
return self.nombre+' | '+self.precio_unidad+'€ | '+self.categoria | EsperanzaMacarena/ejerciciosDjango | ejercicio1/ejercicio1/productos/models.py | models.py | py | 1,429 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "django.db.models.UUIDField",
"line_number": 20,
"usage_type": "call"
},
{
"api_name"... |
75113361384 | import arcade
import random
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
COLORS = [arcade.color.BLUE, arcade.color.FANDANGO_PINK,arcade.color.GOLDEN_POPPY, arcade.color.TURQUOISE_BLUE,arcade.color.SPRING_GREEN,arcade.color.RED,arcade.color.LAVENDER_INDIGO]
class Cercle():
def __init__(self,rayon,x,y,color):
self.rayon = rayon
self.centre_x = x
self.centre_y = y
self.color = color
def draw(self):
arcade.draw_circle_filled(self.centre_x, self.centre_y, self.rayon, self.color)
class MyGame(arcade.Window):
def __init__(self):
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, "Exercice #1")
self.liste_cercles = []
def setup(self):
for _ in range(20):
rayon = random.randint(10, 50)
centre_x = random.randint(0 + rayon, SCREEN_WIDTH - rayon)
centre_y = random.randint(0 + rayon, SCREEN_HEIGHT - rayon)
color = random.choice(COLORS)
cercle = Cercle(rayon,centre_x,centre_y,color)
self.liste_cercles.append(cercle)
def on_draw(self):
arcade.start_render()
for cercle in self.liste_cercles:
cercle.draw()
def on_mouse_press(self, x: int, y: int, button: int, modifiers: int):
if button == arcade.MOUSE_BUTTON_LEFT:
for cercle in self.liste_cercles:
if x < cercle.centre_x + cercle.rayon and cercle.centre_x - cercle.rayon < x and y < cercle.centre_y + cercle.rayon and cercle.centre_y - cercle.rayon < y:
self.liste_cercles.remove(cercle)
elif button == arcade.MOUSE_BUTTON_RIGHT:
for cercle in self.liste_cercles:
if x < cercle.centre_x + cercle.rayon and cercle.centre_x - cercle.rayon < x and y < cercle.centre_y + cercle.rayon and cercle.centre_y - cercle.rayon < y:
cercle.color = random.choice(COLORS)
def main():
my_game = MyGame()
my_game.setup()
arcade.run()
main() | Mousavir/main.py | Exercies_arcade.py | Exercies_arcade.py | py | 1,984 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "arcade.color",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "arcade.draw_circle_filled",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "arcade.Window",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "random.... |
39309304527 | #!/usr/bin/python3
#
# MEG sensor space analysis for visual LTP (vertical & horizontal gratings)
#
# Authors: Paul Sowman, Judy Zhu
#######################################################################################
import os
import mne
import meegkit # for TSPCA
import glob
import matplotlib.pyplot as plt
import numpy as np
import copy
from mne.preprocessing import find_bad_channels_maxwell
#from autoreject import get_rejection_threshold # noqa
#from autoreject import Ransac # noqa
#from autoreject.utils import interpolate_bads # noqa
from scipy import stats
import my_preprocessing
# Make plots interactive when running in interactive window in vscode
#plt.switch_backend('TkAgg') You can use this backend if needed
#plt.ion()
# %matplotlib qt
# set up file and folder paths here
exp_dir = "/mnt/d/Work/analysis_ME197/"
subject_MEG = '230426_72956_S2' #'220112_p003'
meg_tasks = ['_ltp1', '_ltp2', '_ltp3'] #'_oddball' #''
# the paths below should be automatic
data_dir = exp_dir + "data/"
processing_dir = exp_dir + "processing/"
meg_dir = data_dir + subject_MEG + "/meg/"
save_dir = processing_dir + "meg/" + subject_MEG + "/"
os.system('mkdir -p ' + save_dir) # create the folder if needed
fname_elp = glob.glob(meg_dir + "*.elp")
fname_hsp = glob.glob(meg_dir + "*.hsp")
fname_mrk = glob.glob(meg_dir + "*.mrk")
#%% Loop over tasks: pre, 2min post, 30min post
for counter, task in enumerate(meg_tasks):
fname_raw = glob.glob(meg_dir + "*" + task + ".con")
ica_fname = save_dir + subject_MEG + task + "-ica.fif"
epochs_fname = save_dir + subject_MEG + task + "-epo.fif"
ERFs_fname = save_dir + subject_MEG + task + "-ave.fif"
ERFs_figure_fname = save_dir + subject_MEG + task + ".png"
raw = mne.io.read_raw_kit(
fname_raw[0],
mrk=fname_mrk[0],
elp=fname_elp[0],
hsp=fname_hsp[0],
stim=[*range(177, 179)], # these triggers (177 and 178) indicate vertical or horizontal
slope="+",
stim_code="channel",
stimthresh=1, # 2 for adults
preload=True,
allow_unknown_format=False,
verbose=True,
)
# Apply TSPCA for noise reduction
noisy_data = raw.get_data(picks="meg").transpose()
noisy_ref = raw.get_data(picks=[160,161,162]).transpose()
data_after_tspca, idx = meegkit.tspca.tsr(noisy_data, noisy_ref)[0:2]
raw._data[0:160] = data_after_tspca.transpose()
# browse data to identify bad sections & bad channels
raw.plot()
# Filtering & ICA
raw = my_preprocessing.reject_artefact(raw, 0.1, 40, True, ica_fname)
#%% ch misc 23-29 are trigger channels
# MISC 18 and 19 == horizontal / vertical gratings
# MISC 6 or 10 == Photodetector
#%% Finding events
events = mne.find_events(
raw,
output="onset",
consecutive=False,
min_duration=0,
shortest_event=1, # 5 for adults
mask=None,
uint_cast=False,
mask_type="and",
initial_event=False,
verbose=None,
)
for index, event in enumerate(events):
if event[2] == 177: # ch177 == MISC 18
events[index, 2] = 2 # horizontal
elif event[2] == 178: # ch178 == MISC 19
events[index, 2] = 3 # vertical
# Find times of PD triggers
# Ensure correct PD channel is entered here, might sometimes be 165
events_PD = mne.find_events(
raw, stim_channel=[raw.info["ch_names"][x] for x in [169]], output="onset"
)
combined_events = np.concatenate([events, events_PD])
combined_events = combined_events[np.argsort(combined_events[:, 0])]
#%% find the difference between PD time and trigger time
pd_delta = []
for index, event in enumerate(combined_events):
if (
index > 0 # PD can't be first event
and combined_events[index, 2] == 1 # current trigger is PD trigger
and combined_events[index - 1, 2] != 1 # previous trigger is not PD trigger
):
pd_delta.append(
combined_events[index, 0] - combined_events[index - 1, 0] # find the time difference
)
# show histogram of PD delays
n, bins, patches = plt.hist(
x=pd_delta, bins="auto", color="#0504aa", alpha=0.7, rwidth=0.85
)
plt.grid(axis="y", alpha=0.75)
plt.xlabel("Delay (ms)")
plt.ylabel("Frequency")
plt.title("Photo Detector Delays")
plt.text(
70,
50,
r"$mean="
+ str(round(np.mean(pd_delta)))
+ ", std="
+ str(round(np.std(pd_delta)))
+ "$",
)
maxfreq = n.max()
# Set a clean upper y-axis limit.
plt.ylim(ymax=np.ceil(maxfreq / 10) * 10 if maxfreq % 10 else maxfreq + 10)
# Use target events to align triggers & avoid outliers using z of 3
z = np.abs(stats.zscore(pd_delta))
#TODO: check this part works correctly when we do have outliers!
if [pd_delta[i] for i in np.where(z > 3)[0]]:
tmax = -max([pd_delta[i] for i in np.where(z > 3)[0]]) / 1000
else:
tmax = 0
events_to_find = [2, 3] # target events
sfreq = raw.info["sfreq"] # sampling rate
tmin = -0.4 # PD occurs after trigger, hence negative
fill_na = None # the fill value for non-target
reference_id = 1 # PD
# loop through events and replace PD events with event class identifier i.e. trigger number
events_target = {}
for event in events_to_find:
new_id = 20 + event
events_target["event" + str(event)], lag = mne.event.define_target_events(
combined_events,
reference_id,
event,
sfreq,
tmin,
tmax,
new_id,
fill_na,
)
events = np.concatenate((events_target["event2"], events_target["event3"]))
event_ids = {
"horizontal": 22,
"vertical": 23,
}
#%% === Epoching === #
if os.path.exists(epochs_fname):
epochs_resampled = mne.read_epochs(epochs_fname)
else:
epochs = mne.Epochs(
raw, events, event_id=event_ids, tmin=-0.1, tmax=0.5, preload=True
)
conds_we_care_about = ["horizontal", "vertical"]
epochs.equalize_event_counts(conds_we_care_about)
# sanity check - PD triggers occur at 0ms
mne.viz.plot_evoked(
epochs.average(picks="MISC 010")
)
# downsample to 100Hz
print("Original sampling rate:", epochs.info["sfreq"], "Hz")
epochs_resampled = epochs.copy().resample(100, npad="auto")
print("New sampling rate:", epochs_resampled.info["sfreq"], "Hz")
# save the epochs to file
epochs_resampled.save(epochs_fname)
# alternatively, save the evoked (i.e. ERF) here instead of epochs.
# That will save quite a bit of space. We prob won't need to
# read the actual epochs again as we are not doing source analysis.
# Note though: write_evokeds() only accept a list, not a dict, of evokeds,
# so when read back in, you won't be able to do evokeds[cond], instead you
# need to enumerate the evokeds and find out the cond via evoked.comment
evokeds = []
for cond in epochs_resampled.event_id:
evokeds.append(epochs_resampled[cond].average())
mne.write_evokeds(ERFs_fname, evokeds)
# plot ERFs
mne.viz.plot_evoked(epochs_resampled.average(), gfp="only")
fig = mne.viz.plot_compare_evokeds(
[
epochs_resampled["horizontal"].average(),
epochs_resampled["vertical"].average(),
]
)
fig[0].savefig(ERFs_figure_fname)
# report = mne.Report(title=fname_raw[0])
# report.add_evokeds(
# evokeds=evoked, titles=["VEP"], n_time_points=25 # Manually specify titles
# )
# report.save(fname_raw[0] + "_report_evoked.html", overwrite=True)
| Macquarie-MEG-Research/MEG_analysis_mne | Vince_MEG_MD_VEP.py | Vince_MEG_MD_VEP.py | py | 7,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 48,
... |
617619433 | # monitor
import subprocess
import os
import time
import datetime
import threading
import shutil
import argparse
import Dashboard
BROWSER_PATH = ''
METHOD = None
URL = "http://127.0.0.1:8080/flag?"
MODE1 = "--incognito" # 시크릿 모드
MODE2 = "--no-sandbox" # 샌드박스 비활성화
TIMEOUT = 300 # 5min
p = None
RUN_FLAG = False
def main():
global RUN_FLAG, METHOD, DASHBOARD, p
while(1):
if RUN_FLAG:
continue
RUN_FLAG = True
cmd = []
cmd.append(BROWSER_PATH)
cmd.append(URL)
cmd.append(MODE1)
cmd.append(MODE2)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=1, close_fds=True, shell=True)
BROWSER_PID = p.pid
DASHBOARD.Chrome_PID = BROWSER_PID
while(p.poll() is None):
line = p.stderr.readline()
if (b"AddressSanitizer" in line):
# testcase
now_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
testcase_copy = './log/crash_%s_'+now_time+'.ttf'
shutil.copy2('./templates/glitch_testcase.ttf', testcase_copy % METHOD)
# dashboard
DASHBOARD.CRSAH_COUNT += 1
DASHBOARD.LASTEST_CRASH_TIME = now_time
# crash log
log_path = './log/crash_%s_'+now_time+'.log'
with open(log_path % METHOD, "wb") as fp:
fp.write(line)
for line in p.stderr:
fp.write(line)
#set running flag false.
subprocess.check_output('taskkill /f /im chrome_asan.exe', shell=True)
p.stderr.close()
p.stdout.close()
DASHBOARD.Chrome_COUNT += 1
time.sleep(1)
RUN_FLAG = False
def argparse_init():
parser = argparse.ArgumentParser(description='Glitch Monitor')
parser.add_argument('--method', '-m', help='METHOD : normal : use Glitch', default='normal')
return parser
def set_fuzzing_type(parser):
global URL, METHOD
args = parser.parse_args()
if(args.method == 'normal'):
URL += 'freetype_test?ttf=png'
METHOD = 'freetype'
elif(args.method == 'freetype'):
URL += 'freetype_test?ttf=png'
METHOD = 'freetype'
else:
parser.print_help()
os._exit(1)
if __name__ == '__main__':
if(BROWSER_PATH == ''):
print("[!] Please set the BROWSER_PATH.")
exit(1)
parser = argparse_init()
set_fuzzing_type(parser)
try:
DASHBOARD = Dashboard.Dashboard()
DASHBOARD.run_dashboard('./templates/glitch_testcase.ttf')
while True:
browser_run_thread = threading.Thread(target=main)
browser_run_thread.start()
browser_run_thread.join(TIMEOUT) #set timeout 5분 대기
if browser_run_thread.is_alive():
subprocess.check_output('taskkill /f /im chrome_asan.exe', shell=True)
p.stderr.close()
p.stdout.close()
DASHBOARD.Chrome_COUNT += 1
print("Restart!")
time.sleep(1)
RUN_FLAG = False
except KeyboardInterrupt:
os._exit(0)
| BOB-Jour/Glitch_Fuzzer | run_fuzz_windows10.py | run_fuzz_windows10.py | py | 3,327 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.... |
2795908861 | from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
from .settings import Config
from .search import Searcher
from .forms import SearchForm
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
mongo = PyMongo(app, connect = True)
webIndex = mongo.cx.webIndex
pageIndex = mongo.cx.pageIndex
@app.route('/')
def index():
form = SearchForm()
return render_template('index.html', form = form)
@app.route('/search', methods=('GET', 'POST'))
def search():
form = SearchForm()
if form.validate_on_submit():
query = form.query.data
searcher = Searcher()
if query == None:
return str(form)
results, searchTime = searcher.search(query)
return render_template('search.html', query = query, results = results, searchTime = searchTime)
return redirect('/')
return app
| Patrick-Bender/wholesearchcatalog.com | flaskapp-Backup/flaskapp/__init__.py | __init__.py | py | 965 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "settings.Config",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "forms.SearchForm... |
41042866801 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import itertools
from PIL import Image
from scipy.cluster.hierarchy import dendrogram
from sklearn.datasets import load_iris
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from pandas.core.common import flatten
from sklearn.cluster import DBSCAN
import pickle
def load_pickle(file_name):
with open( file_name, 'rb') as reader:
data = pickle.load(reader)
return data
from sklearn.mixture import GaussianMixture
import langid
from matplotlib.colors import ListedColormap
from sklearn.cluster import DBSCAN#
from sklearn.cluster import KMeans
import pytesseract
from PIL import Image
import pickle
import os
import numpy as np
import pandas as pd
import xlsxwriter
def load_pickle(file_name):
with open( file_name, 'rb') as reader:
data = pickle.load(reader)
return data
def create_local_path(path):
try:
if not os.path.exists(path):
os.makedirs(path)
except Exception as e:
pass
#logging.error(e)
def plot_dendrogram(model, **kwargs):
# Create linkage matrix and then plot the dendrogram
# create the counts of samples under each node
counts = np.zeros(model.children_.shape[0])
n_samples = len(model.labels_)
for i, merge in enumerate(model.children_):
current_count = 0
for child_idx in merge:
if child_idx < n_samples:
current_count += 1 # leaf node
else:
current_count += counts[child_idx - n_samples]
counts[i] = current_count
linkage_matrix = np.column_stack([model.children_, model.distances_,
counts]).astype(float)
# Plot the corresponding dendrogram
dendrogram(linkage_matrix, **kwargs)
def plot_cells_by_clusters(path_to_image,df_coordinates, output_filename = 'filename.png', label_column = 'labels'):
"""
:params df_coordinates: dataframe where cells are represented as (x1,y1,x2,y2)
"""
im = Image.open(r'input/'+path_to_image)
# Create figure and axes
fig, ax = plt.subplots(figsize=(15,15))
# Display the image
ax.imshow(im)
palette = itertools.cycle(sns.color_palette('bright', df_coordinates[label_column].nunique()))
for cluster in df_coordinates[label_column].unique():
color =next(palette)
for idx,row in df_coordinates.loc[df_coordinates[label_column]==cluster].iterrows():
left_top = (row.x1,row.y1)
x_delta = row.x2-row.x1
y_delta = row.y2-row.y1
# Create a Rectangle patch
rect = patches.Rectangle(left_top, x_delta, y_delta, linewidth=2, edgecolor=color, facecolor='none')
# Add the patch to the Axes
ax.add_patch(rect)
plt.savefig(output_filename, dpi=300)
plt.show()
def generate_small_cropped_cell_images(df_coordinates,original_filename,original_image):
"""
:param df_coordinates:
:param original_filename:
:param original_image:
:return:
"""
create_local_path(r'output/cropped_cells_images/' + original_filename)
for idx, cell in df_coordinates.iterrows():
cut_cell = cell.copy(deep=True)
cut_cell.x1 -= 4
cut_cell.y1 -= 4
cut_cell.x2 += 4
cut_cell.y2 += 4
cropped_cell = original_image.crop(cut_cell)
name = r'output/cropped_cells_images/' + original_filename + '/' + f'{idx}' + '.png'
cropped_cell.save(name, quality=95)
def run_tessaract_for_cut_cells(df_coordinates,original_filename):
parsed_text = []
for idx, row in df_coordinates.iterrows():
name = r'output/cropped_cells_images/' + original_filename + '/' + f'{idx}' + '.png'
cropped_image = Image.open(name)
parsed_cell_rus = pytesseract.image_to_string(cropped_image, lang='rus',
config='--tessdata-dir "C:\\Program Files\\Tesseract-OCR\\tessdata"')
parsed_cell_eng = pytesseract.image_to_string(cropped_image, lang='eng',
config='--tessdata-dir "C:\\Program Files\\Tesseract-OCR\\tessdata"')
# we need to determine what text is in this cell ( it may be English or Russian)
try:
lang_r, score_r = langid.classify(parsed_cell_rus)
lang_e, score_e = langid.classify(parsed_cell_eng)
if score_e>score_r:
if lang_e.lang=='en':
parsed_cell = parsed_cell_eng
else:
parsed_cell = parsed_cell_rus
except:
# no features in text, most likely a number
parsed_cell = parsed_cell_rus
if parsed_cell.count('\n')==1:
#Succesfull parse
parsed_cell = parsed_cell.split('\n')[0]
else:
parsed_cell = parsed_cell
parsed_text.append(parsed_cell)
return parsed_text
def create_parsed_tabular_dataset(df_coordinates):
x_centroids = df_coordinates.groupby(by='x_labels').mean()[['x_center']].sort_values(by='x_center').reset_index(drop=True)
y_centroids = df_coordinates.groupby(by='y_labels').mean()[['y_center']].sort_values(by='y_center').reset_index(drop=True)
filled_dataframe_cell_idx = pd.DataFrame(index=y_centroids.index, columns=x_centroids.index)
filled_dataframe_cell_parses_text = pd.DataFrame(index=y_centroids.index, columns=x_centroids.index)
# filled_dataframe_cell_idx = filled_dataframe_cell_idx.fillna(None)
for idx, cell in df_coordinates.iterrows():
x_cell_ind = x_centroids.sub(cell.x_center).abs().idxmin().values[0]
y_cell_ind = y_centroids.sub(cell.y_center).abs().idxmin().values[0]
if filled_dataframe_cell_idx.iloc[y_cell_ind, x_cell_ind] is np.nan:
filled_dataframe_cell_idx.iloc[y_cell_ind, x_cell_ind] = idx
filled_dataframe_cell_parses_text.iloc[y_cell_ind, x_cell_ind] = cell.parsed_text
else:
# ravel list of lists
filled_dataframe_cell_idx.iloc[y_cell_ind, x_cell_ind] = list(
flatten([filled_dataframe_cell_idx.iloc[y_cell_ind, x_cell_ind], idx]))
filled_dataframe_cell_parses_text.iloc[y_cell_ind, x_cell_ind] = filled_dataframe_cell_parses_text.iloc[
y_cell_ind, x_cell_ind] + ' // ' + cell.parsed_text
return filled_dataframe_cell_idx,filled_dataframe_cell_parses_text
| pletnev-aleksandr/nsd_hackathon | libb.py | libb.py | py | 6,556 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 3... |
17581792452 | from typing import Any, Dict, Tuple, Optional
from urllib.parse import urlparse
from starwhale.utils import config
from starwhale.base.uri.exceptions import NoMatchException
def _get_instances() -> Dict[str, Dict]:
return config.load_swcli_config().get("instances", {}) # type: ignore[no-any-return]
def _get_default_instance_alias() -> str:
return config.load_swcli_config().get("current_instance", "") # type: ignore[no-any-return]
def _find_alias_by_url(url: str, token: Optional[str] = None) -> Tuple[str, str]:
"""parse url and return instance alias and path from url"""
if not url:
return _get_default_instance_alias(), ""
inst_uri_map = {name: conf["uri"] for name, conf in _get_instances().items()}
inst_names = list(inst_uri_map.keys())
# fast path if url is alias
if url in inst_names:
return url, ""
p = urlparse(url)
ins_url = "://".join([p.scheme, p.netloc])
if token is not None:
return "tmp", url
# use host as alias when url starts with cloud or non-scheme
if p.scheme == "cloud":
if p.netloc not in inst_uri_map:
raise NoMatchException(p.netloc, inst_names)
return p.netloc, p.path
elif p.scheme == "":
netloc, path = url.split("/", 1)
if netloc not in inst_uri_map:
raise NoMatchException(netloc, inst_names)
return netloc, path
else:
hits = [name for name, uri in inst_uri_map.items() if uri == ins_url]
if len(hits) == 1:
return hits[0], p.path
raise NoMatchException(url, hits)
def _check_alias_exists(alias: str) -> None:
if alias not in _get_instances():
raise NoMatchException(alias)
class Instance:
"""
Data structure for Instance info
"""
alias: str
path: str = ""
def __init__(
self,
uri: str = "",
instance_alias: Optional[str] = None,
token: Optional[str] = None,
) -> None:
self._info: Dict[str, Any] = {}
if instance_alias and uri:
raise Exception("alias and uri can not both set")
if not instance_alias:
instance_alias, path = _find_alias_by_url(uri, token)
self.path = path.strip("/")
if token is None:
_check_alias_exists(instance_alias)
else:
self._info = {"sw_token": token, "uri": path, "type": "cloud"}
self.alias = instance_alias
@property
def info(self) -> Dict[str, str]:
"""Get current instance info"""
return self._info or _get_instances().get(self.alias, {})
@property
def url(self) -> str:
return self.info.get("uri", "")
@property
def type(self) -> str:
return self.info.get("type", "")
@property
def username(self) -> str:
return self.info.get("username", "")
@property
def token(self) -> str:
return self.info.get("sw_token", "")
@property
def is_local(self) -> bool:
return self.url == "local"
@property
def is_cloud(self) -> bool:
return not self.is_local
def __str__(self) -> str:
if self.is_local:
return self.url
return f"cloud://{self.alias}"
| star-whale/starwhale | client/starwhale/base/uri/instance.py | instance.py | py | 3,238 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "starwhale.utils.config.load_swcli_config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "starwhale.utils.config",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 8,
"usage_type": "name"
},
{
"api_name... |
5456383430 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# @name : PhoneInfoga - Phone numbers OSINT tool
# @url : https://github.com/sundowndev
# @author : Raphael Cerveaux (sundowndev)
from bs4 import BeautifulSoup
import hashlib
import json
from lib.output import *
from lib.request import send
'''
Scanning phone number
return:
0: Success
-1: phone number format error. Error: Please specify a valid phone number. Example: +6464806649
-2: connection error to https://numverify.com/
'''
def scan(number, proxy):
#test('Running Numverify.com scan...')
try:
requestSecret = ''
res = send('GET', 'https://numverify.com/', {}, proxy)
soup = BeautifulSoup(res.text, "html5lib")
except Exception as e:
error('Numverify.com is not available')
#error(e)
return -2, {'valid':False}
for tag in soup.find_all("input", type="hidden"):
if tag['name'] == "scl_request_secret":
requestSecret = tag['value']
break
apiKey = hashlib.md5((number + requestSecret).encode('utf-8')).hexdigest()
headers = {
'Host': 'numverify.com',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:64.0) Gecko/20100101 Firefox/64.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'fr,fr-FR;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://numverify.com/',
'X-Requested-With': 'XMLHttpRequest',
'DNT': '1',
'Connection': 'keep-alive',
'Pragma': 'no-cache',
'Cache-Control': 'no-cache'
}
try:
res = send("GET", "https://numverify.com/php_helper_scripts/phone_api.php?secret_key={}&number={}".format(apiKey, number), headers, proxy)
data = json.loads(res.content.decode('utf-8'))
except Exception as e:
#error('Numverify.com is not available')
return -2, {'valid':False}
if res.content == "Unauthorized" or res.status_code != 200:
#error(("An error occured while calling the API (bad request or wrong api key)."))
return -2, {'valid':False}
if 'error' in data:
#error('Numverify.com is not available: ' + data['error'])
return -2, {'valid':False}
return 0, data
| wagdev1919/BulkPhonenumberVerifier | BulkPhoneNumberVerifier/scanners/numverify.py | numverify.py | py | 2,294 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "lib.request.send",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "hashlib.md5",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "lib.request.send",
... |
21625949314 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import argparse
import numpy as np
import networkx as nx
import subprocess
import os
from tulip import tlp
import pandas as pd
from evalne.utils import preprocess as pp
def parse_args():
""" Parses Fruchterman-Reingold arguments."""
parser = argparse.ArgumentParser(
description="Run force-based Fruchterman-Reingold algorithm."
)
parser.add_argument("--inputgraph", nargs="?", help="Input graph path")
parser.add_argument(
"--output",
nargs="?",
default=None,
help="Path where the embeddings will be stored.",
)
parser.add_argument(
"--dimension", type=int, default=2, help="Embedding dimension. Default is 2."
)
parser.add_argument(
"--epochs", type=int, default=100, help="Training epochs. Default is 100."
)
parser.add_argument(
"--delimiter", default=",", help="The delimiter used to separate the edgelist."
)
parser.add_argument(
"--mode",
default="rtx",
type=str,
choices=["networkx", "rtx", "naive", "lbvh"],
help="Choose FR implementation. Networkx = networkx.spring_layout and rtx as \
well as lbvh and naive use the owl-graph-drawing implementation",
)
parser.add_argument(
"--exec",
type=str,
default="methods/frrtx/build/gd",
help="Path where the executable 'gd' can be found.",
)
return parser.parse_args()
def tulip2txt(input, output, delimiter=","):
"""
https://tulip.labri.fr/Documentation/current/tulip-python/html/tulippluginsdocumentation.html#csv-export
"""
tmp_tlp_output = os.path.join(os.path.split(output)[0], "tmp_tlp_output.csv")
# read tlp file
params = tlp.getDefaultPluginParameters("TLP Import")
params["filename"] = input
graph = tlp.importGraph("TLP Import", params)
# export layout properties to csv
params = tlp.getDefaultPluginParameters("CSV Export", graph)
params["Field separator"] = "Custom"
params["Custom separator"] = ";"
params["Type of elements"] = "nodes"
params["Export id"] = True
params["Export visual properties"] = True
tlp.exportGraph("CSV Export", graph, tmp_tlp_output, params)
# read tmp export and extract coordinates
tlp_graph = pd.read_table(tmp_tlp_output, delimiter=";")
coordinates = tlp_graph["viewLayout"].values
coordinates = [
[float(tp.strip("()").split(",")[0]), float(tp.strip("()").split(",")[1])]
for tp in coordinates
]
np.savetxt(output, coordinates, delimiter=delimiter)
# delete temporary file
if os.path.isfile(tmp_tlp_output):
os.remove(tmp_tlp_output)
def tulip2edgestxt(input, output, delimiter=","):
"""
https://tulip.labri.fr/Documentation/current/tulip-python/html/tulippluginsdocumentation.html#csv-export
"""
tmp_tlp_output = os.path.join(os.path.split(output)[0], "tmp_tlp_output.csv")
# read tlp file
params = tlp.getDefaultPluginParameters("TLP Import")
params["filename"] = input
graph = tlp.importGraph("TLP Import", params)
# export layout properties to csv
params = tlp.getDefaultPluginParameters("CSV Export", graph)
params["Field separator"] = "Custom"
params["Custom separator"] = ";"
params["Type of elements"] = "edges"
params["Export id"] = True
params["Export visual properties"] = False
tlp.exportGraph("CSV Export", graph, tmp_tlp_output, params)
# read tmp export and extract coordinates
tlp_graph = pd.read_table(tmp_tlp_output, delimiter=";")
src = tlp_graph["src id"].values
target = tlp_graph["tgt id"].values
np.savetxt(output, np.transpose(np.array([src, target])), delimiter=delimiter, fmt="%i")
# delete temporary file
if os.path.isfile(tmp_tlp_output):
os.remove(tmp_tlp_output)
def run_RT_FR(input_file, output_file, delimiter, epochs, exec, mode):
# the implementation needs a header for the edgelist input file
edges = np.loadtxt(input_file, delimiter=",", dtype=int)
# transform file into csv with header
tmp_rtx_edgefle = os.path.join(os.path.split(input_file)[0], "tmp_rtx_edgefile.csv")
np.savetxt(tmp_rtx_edgefle, edges, delimiter=",", fmt="%i", header="edgelist")
tmp_tulip_output = os.path.join(os.path.split(output_file)[0], "tmp_tulip.tlp")
start = time.time()
process = subprocess.Popen(
[
exec,
"-bench=true",
"-dt=file",
"-mode=" + mode,
"-n=" + str(epochs),
"-o=" + tmp_tulip_output,
tmp_rtx_edgefle,
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf8"
)
while True:
if process.poll() is not None:
break
output = process.stdout.readline()
if output:
dec_output = output.strip()
if "done" in dec_output:
break
runtime = time.time() - start
print(f"Embedding time: {runtime:.4f}")
assert os.path.isfile(tmp_tulip_output), f"FR-RTX did not produce the output file {tmp_tulip_output}."
process.terminate()
# convert tlp into txt embedding
tulip2txt(tmp_tulip_output, output_file, delimiter)
# write out edges from tulip
tmp_tulip_edgefile = "tmp_tlp_edges.txt"
tulip2edgestxt(tmp_tulip_output, tmp_tulip_edgefile, delimiter)
tlp_edges = np.loadtxt(tmp_tulip_edgefile, delimiter=",", dtype=int)
embedding = np.loadtxt(output_file, delimiter=delimiter)
if not (edges == tlp_edges).all():
print("Nodes are relabeled by FR-RTX. Computing node mapping.")
# edgelists are not the same -- so node IDs should be in different order
flat_edges = edges.flatten()
indexes = np.unique(edges.flatten(), return_index=True)[1]
unique_edges = [flat_edges[index] for index in sorted(indexes)]
node_order = np.argsort(unique_edges)
embedding = embedding[node_order]
if os.path.isfile(tmp_tulip_output):
os.remove(tmp_tulip_output)
if os.path.isfile(tmp_tulip_edgefile):
os.remove(tmp_tulip_edgefile)
if os.path.isfile(tmp_rtx_edgefle):
os.remove(tmp_rtx_edgefle)
np.savetxt(output_file, embedding, delimiter=delimiter)
return float(runtime)
def spring_layout(input_file, output_file, delimiter, epochs):
G = pp.load_graph(input_file, delimiter=delimiter)
G, _ = pp.prep_graph(G)
# Compute Fruchterman-Reingold layout
start = time.time()
embedding_dict = nx.spring_layout(G, iterations=epochs)
runtime = time.time() - start
print(f"Embedding time: {runtime:.4f}")
embedding_dict = dict(sorted(embedding_dict.items(), key=lambda item: item[0]))
embedding = np.array(list(embedding_dict.values()))
# Store the embedding in output file
np.savetxt(output_file, embedding, delimiter=delimiter)
def main(args):
# Naive spring layout implementation
if args.mode == "networkx" or not os.path.isfile(args.exec):
spring_layout(args.inputgraph, args.output, args.delimiter, args.epochs)
# Use faster owl-graph-drawing implementation
else:
run_RT_FR(
args.inputgraph,
args.output,
args.delimiter,
args.epochs,
args.exec,
args.mode,
)
if __name__ == "__main__":
args = parse_args()
main(args) | aida-ugent/graph-vis-eval | methods/fruchterman_reingold.py | fruchterman_reingold.py | py | 7,489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "os.path.split",
... |
15615165732 | from sys import argv, exit
from collections import OrderedDict
from functools import reduce
import re
import httplib2
# Google API
from apiclient import discovery
from apiclient.errors import HttpError
from oauth2client import client, tools
from oauth2client.file import Storage
# Reddit API
import praw
import OAuth2Util
#
# Configuration.
#
# The ID of the spreadsheet, which can be found in the URL. This example is the
# ID for https://docs.google.com/spreadsheets/d/1v8SVnzrGaupBKm6CbtmPjMqOrkcl0_sfEW0eEcujkys/edit
SPREADSHEET_ID = "1v8SVnzrGaupBKm6CbtmPjMqOrkcl0_sfEW0eEcujkys"
# The flair sheet is the sheet where the username, mail, snail mail and flair
# assignment columns are. The bot assumes these columns are given in this order
# starting in column A and the actual values start in row 2.
FLAIR_SHEET = "Sheet1"
# This bot will store the timestamp of the last comment it handled. This way it
# will know which comments have already been handled. This reference will
# be stored in the cell indicated here. See
# https://developers.google.com/sheets/guides/concepts#a1_notation to know how
# to reference cells. You can always hide these in a hidden sheet if you prefer
# them not being visible on the main sheet.
PREV_COMMENT_TIME_CELL = FLAIR_SHEET+"!J2"
# The bot will also store a reference to the previous thread it processed, this
# way it can handle any new comments in the old sticky before going to the new
# one. It will also make it easy for the bot to know when the "prev comment"
# value is no longer relevant, as the sticky thread changed.
PREV_THREAD_ID_CELL = FLAIR_SHEET+"!I2"
# if a user tries to enter a high combined verification count for a single user,
# the bot will not automatically update the count, but send an email to the
# moderators instead. E.g. with a threshold of 30, "/u/user 20 20" will not
# automatically be added, but will cause a mail to be sent to the mods instead.
COUNT_THRESHOLD_USER = 10
# The same as before, except it will look if the total verification count of a
# comment is high, instead of just the verified count for a single user.
COUNT_THRESHOLD_COMMENT = 30
# A list of mods that will be mailed if the bot encounters something fishy. If
# the list is empty, no mods will be mailed. Usernames are always written
# without the /u/ part. To send to all moderators of a subreddit, write
# /r/subreddit_name
#MODS = ["wmeacham", ]
#MODS = ["/r/penpals", ]
MODS = ["/r/BitwiseShiftTest"]
# A definition of all ranks in descending order of prestige. The first value
# must be the flair CSS class, the second is a function determining whether
# a user has met the conditions for this flair. I guessed these, change them
# to their actual values! The requirements function gets passed a row from the
# datasheet, whose values are all strings.
RANKS = OrderedDict([
("combogold", lambda row: int(row[1]) >= 300 and int(row[2]) >= 100),
("goldsnail", lambda row: int(row[1]) >= 300 or int(row[2]) >= 100),
("combosilver", lambda row: int(row[1]) >= 30 and int(row[2]) >= 10),
("silversnail", lambda row: int(row[1]) >= 30 or int(row[2]) >= 10),
("combobronze", lambda row: int(row[1]) >= 3 and int(row[2]) >= 1),
("bronzeemail", lambda row: int(row[1]) >= 3 or int(row[2]) >= 1),
("", lambda row: True), # Catch-all, for people who have not met any requirements yet.
])
# Wether to allow users to verify themselves.
ALLOW_SELF_VERIFICATION = False
#
# Actual bot code
#
# Read configuration file if one is given.
if len(argv) == 2:
try:
with open(argv[1], "r") as f:
exec(f.read())
except FileNotFoundError as e:
print("[ERROR] The config file could not be found.")
raise e
except Exception as e:
print("[ERROR] The config file contains error.")
raise e
elif len(argv) > 2:
print("[Error] Correct syntax: {} [config_file]".format(argv[0]))
exit()
class colors:
# Adapted from https://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python
OK = "\033[92m\033[1m"
WARNING = "\033[93m\033[1m"
ERROR = "\033[91m\033[1m"
INPUT = "\033[94m\033[1m"
ENDC = "\033[0m"
def get_credentials():
""" Gets valid user credentials from storage.
If nothing has been stored, or if the stored credentials are invalid,
the OAuth2 flow is completed to obtain the new credentials.
Returns:
Credentials, the obtained credential.
"""
store = Storage("sheets.googleapis.com-python-penpalsbot.json")
credentials = store.get()
if not credentials or credentials.invalid:
flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)
flow.user_agent = APPLICATION_NAME
credentials = tools.run_flow(flow, store)
return credentials
def get_verification_thread():
print("Looking for new verification sticky...")
print("Checking top sticky...", end="")
sticky = r.get_sticky(SUBREDDIT, True)
if "verification post" not in sticky.title.lower():
print("\nChecking bottom sticky...", end="")
sticky = r.get_sticky(SUBREDDIT, False)
if "verification post" not in sticky.title.lower():
sticky = None
print(colors.OK+" DONE"+colors.ENDC)
return sticky
def message_mods(subject, message):
""" Lazy loaded send mods a message. """
return
global MODS
for mod in MODS:
try:
r.send_message(mod, subject, message, from_sr=SUBREDDIT)
except praw.errors.Forbidden:
print(colors.WARNING+"[WARNING]"+colors.ENDC+" Unable to send a message to /u/{}".format(mod))
def expand_comments(thread):
""" Expand top-level comments to the point where there are no more comments.
Returned comments are sorted descendingly by time.
"""
comments = list(thread.comments)
oldLen = 0
newLen = len(comments)
while newLen != oldLen:
oldLen = newLen
thread.replace_more_comments()
comments = list(thread.comments)
newLen = len(comments)
comments.sort(key=lambda x: int(x.created_utc), reverse=True)
return comments
def total_verification_count(verifications):
return reduce(lambda a, x: a + int(x["mail_count"]) + int(x["letter_count"]), verifications, 0)
def process_comments(thread, prev_comment_time=0):
""" Process comments for verification strings.
Returns the UTC timestamp of the last processed comment. If no new
comments were found, returns None.
"""
comments = expand_comments(thread)
if not comments or int(comments[0].created_utc) <= prev_comment_time:
print("No new comments found.")
return None
for comment in comments:
if int(comment.created_utc) <= prev_comment_time:
break
print("+ Handling new comment. ID={}".format(comment.id))
verifications = [] # Stores all verifications of a comment until it is processed.
error_occurred = False
# Start looking for verification count strings.
paragraphs = comment.body.splitlines()
for paragraph in paragraphs:
match = RE_VERIFICATION_SYNTAX.match(paragraph)
if match:
print("... Verification count string found: "+paragraph)
# Add user to added_count if he wasn't in there yet.
data = match.groupdict()
if not ALLOW_SELF_VERIFICATION and comment.author.name == data["username"]:
print("... "+colors.WARNING+"[WARNING]"+colors.ENDC+" Trying to verify himself. Ignoring and messaging mods.")
message_mods("Self-verification", """
It appears [a user]({}) is attempting to verify themselves.
This comment has been ignored and will have to be manually
verified.
""".format(comment.permalink))
error_occurred = True
break
data["mail_count"] = int(data["mail_count"])
data["letter_count"] = int(data["letter_count"])
# Check if the COUNT_THRESHOLD_USER hasn't been exceeded.
if data["mail_count"] + data["letter_count"] >= COUNT_THRESHOLD_USER:
print("... "+colors.WARNING+"[WARNING]"+colors.ENDC+" High verification count for a single user. Ignoring and messaging mods.")
message_mods("Verification count threshold exceeded", """
It appears [a comment]({}) is attempting to verify a large
email and/or letter count for a single user. This comment
has been ignored and will have to be manually verified.
""".format(comment.permalink))
error_occurred = True
break
else:
verifications.append(data)
# Only verify the comment threshold id the user threshold wasn't exceeded.
if not error_occurred:
# Check the comment threshold.
if total_verification_count(verifications) > COUNT_THRESHOLD_COMMENT:
print("... "+colors.WARNING+"[WARNING]"+colors.ENDC+" High verification count for a single user. Ignoring and messaging mods.")
message_mods("Verification count threshold exceeded", """
It appears [a comment]({}) is attempting to verify a large
email and/or letter count for a single user. This comment
has been ignored and will have to be manually verified.
""")
else:
# No errors, apply the verification counts.
for data in verifications:
global added_count
if data["username"] not in added_count:
added_count[data["username"]] = {"mail_count": 0, "letter_count": 0}
added_count[data["username"]]["mail_count"] += data["mail_count"]
added_count[data["username"]]["letter_count"] += data["letter_count"]
return int(comments[-1].created_utc)
def get_flair_css_class(spreadsheet_row):
for flair_css_class, requirement_fun in RANKS.items():
if requirement_fun(spreadsheet_row):
return flair_css_class
def recompute_spreadsheet_data(spreadsheet_data, added_count):
""" Recomputes all values in the spreadsheet.
Returns all flairs that changed during the recompute in the form
[{username: string, flair: string}]
"""
changed_flairs = {} # Will contain values {username: flairstring}
for i, (username, mail_count, letter_count, flair_class) in enumerate(spreadsheet_data[1:], 1):
if username[3:] in added_count:
spreadsheet_data[i][1] = str(int(spreadsheet_data[i][1]) + added_count[username[3:]]["mail_count"])
spreadsheet_data[i][2] = str(int(spreadsheet_data[i][2]) + added_count[username[3:]]["letter_count"])
new_flair_class = get_flair_css_class(spreadsheet_data[i])
# Only bother updating flairs that changed, paying special attention to n/a's that are non-existant flair classes.
if new_flair_class != flair_class and not (not new_flair_class and flair_class == "n/a"):
spreadsheet_data[i][3] = new_flair_class
changed_flairs[username[3:]] = new_flair_class
del added_count[username[3:]]
# Append all users that haven't been handled yet to the end of the spreadsheet.
for username, counts in added_count.items():
new_flair_class = get_flair_css_class(spreadsheet_data[i])
changed_flairs[username[3:]] = new_flair_class
spreadsheet_data.append(["/u/"+username, counts["mail_count"], counts["letter_count"], new_flair_class])
return changed_flairs
def update_flairs(changed_flairs):
print("Updating flairs...")
after = None
flairs = []
unknowns = []
# Update existing flairs.
for username, flair_css_class in changed_flairs.items():
print("... /u/{} <- {}".format(username, flair_css_class))
try:
flair = r.get_flair(SUBREDDIT, username)
except praw.errors.Forbidden:
print(colors.ERROR+"[ERROR]"+colors.ENDC+" Could not retrieve flair information! Does the bot have mod privileges?".format(username))
exit()
if flair == None:
print(colors.WARNING+"[WARNING]"+colors.ENDC+" /u/{} does not exist!".format(username))
# TODO: do something more with the knowledge that the user doesn't exists?
continue
# The empty flair texts (None) need to be converted to empty strings.
if flair["flair_css_class"] == None:
flair["flair_css_class"] = ""
if flair["flair_text"] == None:
flair["flair_text"] = ""
# Validate the flair classes with those defined.
if flair["flair_css_class"] not in RANKS and flair["flair_css_class"] not in unknowns:
unknowns.append(flair_css_class)
print(colors.ERROR+"[ERROR]"+colors.ENDC+' Encountered an unknown CSS flair class "{}".'.format(flair["flair_css_class"]))
print("... Queuing flair {} for /u/{}".format(flair_css_class, flair["user"]))
flair["flair_css_class"] = flair_css_class
flairs.append(flair)
# Only give a warning the first time an unknown class is encountered.
if unknowns:
input(colors.INPUT+"[INPUT NEEDED]"+colors.ENDC+" There were some unknown CSS flair classes!\n"
" This probably means you forgot to define that class.\n"
" Close the progam now and fix the error, or press ENTER\n"
" to continue, ignoring any errors.")
while flairs:
# Update flairs.
to_i = min(flairs.len, 100)
r.set_flair_csv(SUBREDDIT, flairs[:to_i])
print("Succesfully changed a section of the user's flairs")
flairs = flairs[to_i:]
print(colors.OK+"[SUCCESS]"+colors.ENDC+" Updated all user's flairs")
# Google Sheets API
SCOPES = "https://www.googleapis.com/auth/spreadsheets"
CLIENT_SECRET_FILE = "client_secret.json"
APPLICATION_NAME = "Google Sheets API Python Quickstart"
DATA_RANGE = FLAIR_SHEET+"!A:D"
# Global variables
SUBREDDIT = "penpals"
# Regular expressing expressing the syntax of the command.
RE_VERIFICATION_SYNTAX = re.compile(
r"\A\s*" # Any leading whitespace.
# Reddit username, based on the Reddit source code.
# First / is optional, as people seemed to forget it occasionally.
# Entire /u/ part can be left out as well.
r"(/?u/)?(?P<username>[\w\-]+)"
r"[\s,_\-+]+" # Delimiters.
r"(?P<mail_count>[0-9]+)"
r"[\s,_\-+]+" # Delimiters.
r"(?P<letter_count>[0-9+]+)"
r"\s*\Z", # Any trailing whitespace.
re.UNICODE)
print("Authenticating with Docs API...", end="")
credentials = get_credentials()
http = credentials.authorize(httplib2.Http())
discoveryUrl = ("https://sheets.googleapis.com/$discovery/rest?version=v4")
service = discovery.build("sheets", "v4", http=http,
discoveryServiceUrl=discoveryUrl)
print(colors.OK+" DONE"+colors.ENDC)
print("Authenticating with Reddit...", end="")
r = praw.Reddit("Python:PenpalsVerification by /u/BitwiseShift")
r.config.api_request_delay = 1.0
o = OAuth2Util.OAuth2Util(r)
o.refresh()
print(colors.OK+" DONE"+colors.ENDC)
# Keeps tracks of added counts per user.
added_count = {} # Will contain count in the form {username: {mail_count: int, letter_count: int}}.
print("Getting spreadsheet data... ", end="")
prev_thread_id, prev_comment_time, spreadsheet_data = service.spreadsheets().values().batchGet(
spreadsheetId=SPREADSHEET_ID, ranges=[PREV_THREAD_ID_CELL, PREV_COMMENT_TIME_CELL, DATA_RANGE]).execute()["valueRanges"]
# Set default values.
prev_thread_id = prev_thread_id.get("values", [[None]])[0][0]
prev_comment_time = int(prev_comment_time.get("values", [[0]])[0][0])
spreadsheet_data = spreadsheet_data.get("values", [])
# Pad rows, as Google leaves out empty rows at the end, i.e. empty css class strings.
spreadsheet_data = [row + [""]*(4-len(row)) for row in spreadsheet_data]
if prev_thread_id == None:
print("No previous thread run found.")
prev_comment_time = 0
comment_time1 = None
else:
print("Checking for new comments in previous thread run...")
prev_thread = r.get_submission(submission_id=prev_thread_id)
comment_time1 = process_comments(prev_thread, prev_comment_time)
prev_comment_time = comment_time1 if comment_time1 else prev_comment_time
sticky = get_verification_thread()
# Check if the sticky was found.
if sticky:
print("Verification thread found!")
else:
print("Could not find new the verification thread!")
# Check if the sticky is different from the previous thread we processed.
if sticky.id == prev_thread_id:
print("The verification thread hasn't changed!")
comment_time2 = None
else:
comment_time2 = process_comments(sticky, prev_comment_time)
prev_comment_time = comment_time2 if comment_time2 else prev_comment_time
prev_thread_id = sticky.id
# Only continue further if some new comments were found.
if comment_time1 or comment_time2:
print("Handled all comments!")
print("Computing new counts and flairs...", end="")
changed_flairs = recompute_spreadsheet_data(spreadsheet_data, added_count)
print(colors.OK+" DONE"+colors.ENDC)
if changed_flairs:
try:
update_flairs(changed_flairs)
print(colors.OK+" DONE"+colors.ENDC)
except praw.errors.Forbidden:
print(colors.ERROR+"\n[ERROR]"+colors.ENDC+" Unable to update the user's flairs! Does the bot have mod privileges?")
exit()
else:
print("No flairs changed!")
print("Writing persistence values to spreadsheet...", end="")
data = [
{"range": PREV_THREAD_ID_CELL, "values": [[prev_thread_id]]},
{"range": PREV_COMMENT_TIME_CELL, "values": [[str(prev_comment_time)]]},
{"range": DATA_RANGE, "values": spreadsheet_data}
]
service.spreadsheets().values().batchUpdate(
body={"valueInputOption": "USER_ENTERED", "data": data},
spreadsheetId=SPREADSHEET_ID).execute()
print(colors.OK+" DONE"+colors.ENDC)
else:
print("No new comments!")
print(colors.OK+"[SUCCESS]"+colors.ENDC+" Script finished!")
| JohnnyDeuss/reddit-bots | PenpalsVerification/PenpalsVerification.py | PenpalsVerification.py | py | 16,850 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 79,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 81,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_nu... |
30019222748 | from flask import Flask, render_template, redirect
import pymongo
from flask_pymongo import PyMongo
import scrape_mars
#Spin up the Flask App
app = Flask(__name__, template_folder='templates')
app.config["MONGO_URI"] = "mongodb://localhost:27017/mars_app"
mongo = PyMongo(app)
#Or set inline
#mongo = PyMongo(app, uri="mongodb://localhost:27017/mars_app")
#Creation of connection variable. Connection to local host for local Mongo.
#conn = "mongodb://localhost:27017"
#Pass connection to the pymongo instance"
#client = pymongo.MongoClient(conn)
#Connect to a database. Will create one if not already available
#db = client.redplanet_db
#mars = db.mars_dict
#Drops collection if available to remove dublicates
#db.mars_dict.drop()
#Mongo Creates Collection automatically
#db.mars_dict.inset_many()#insert list of dict
#App Route
@app.route("/")
def index():
mars = mongo.db.mars.find_one()
return render_template("index.html", data=mars)
@app.route("/scrape")
def scraper():
mars = mongo.db.mars
mars_data = scrape_mars.scrape()
mars.update({}, mars_data, upsert=True)
#Redirect to the homepage
return redirect("/", code=302)
if __name__ == "__main__":
app.run(debug=True) | fabzum/web-scraping-challenge | Missions_to_Mars/app.py | app.py | py | 1,218 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scrape_mars.sc... |
10763879949 |
import numpy as np
from scipy.optimize import minimize
import scipy.io
def sigmoid(z):
return 1.0/(1 + np.e**(-z))
def lrCostFunction(theta,X,y,reg_param):
m = len(y)
J =((np.sum(-y*np.log(sigmoid(np.dot(X,theta)))-
(1-y)*(np.log(1-sigmoid(np.dot(X,theta))))))/m +
(reg_param/m)*np.sum(theta**2))
grad_0 = (np.sum((sigmoid(np.dot(X,theta))-y)[:,None]*X,axis=0)/m)
grad_reg = grad_0 + (reg_param/m)*theta
grad_reg[0] = grad_0[0]
return (J,grad_reg)
def oneVsAll(X, y, num_labels, reg_param):
n = np.size(X,1)
theta = np.zeros((n,num_labels))
def findOptParam(p_num):
outcome = np.array(y == p_num).astype(int)
initial_theta = theta[:,p_num]
results = minimize(lrCostFunction,
initial_theta,
method='Newton-CG',
args=(X,outcome,reg_param),
jac=True,
tol=1e-6,
options={'maxiter':400,
'disp':True})
theta[:,p_num] = results.x
for digit in range(10):
findOptParam(digit)
return theta
def predictOneVsAllAccuracy(est_theta,X):
probs = np.dot(X,est_theta)
predict = np.argmax(probs,axis=1)
return predict
def predict(theta1,theta2,X):
m = len(X)
if np.ndim(X) == 1:
X = X.reshape((-1,1))
D1 = np.hstack((np.ones((m,1)),X))
hidden_pred = np.dot(D1,theta1.T)
ones = np.ones((len(hidden_pred),1))
hidden_pred = sigmoid(hidden_pred)
hidden_pred = np.hstack((ones,hidden_pred))
output_pred = np.dot(hidden_pred,theta2.T)
output_pred = sigmoid(output_pred)
p = np.argmax(output_pred,axis=1)
return p
input_layer_size = 400
num_labels = 10
raw_mat = scipy.io.loadmat("ex3data1.mat")
X = raw_mat.get("X")
y = raw_mat.get("y").flatten()
y[y== 10] = 0
m = np.hstack((np.ones((len(y),1)),X))# add column of ones
rand_indices = np.random.randint(0,len(m),100)
sel = X[rand_indices,:]
reg_param = 1.0
theta = oneVsAll(m,y,10,reg_param)
predictions = predictOneVsAllAccuracy(theta,m)
accuracy = np.mean(y == predictions) * 100
print(accuracy)
| kumudlakara/MRM | multiclass_logistic_regression.py | multiclass_logistic_regression.py | py | 2,225 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.e",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.dot",
"line_number": 12,
... |
16926338982 | import dash
from dash import dcc
from dash import html
import dash_daq as daq
from dash.dependencies import Input, Output
import pandas as pd
import numpy as np
import plotly.graph_objs as go
import plotly.express as px
import umap
from umap import UMAP
import dash_bootstrap_components as dbc
# Launch the application
app = dash.Dash(external_stylesheets=[dbc.themes.BOOTSTRAP])
# Step 2. Import the dataset
#df = pd.read_csv('finance-charts-apple.csv')
with open('/mnt/c/Users/Ebru-as-user/Downloads/Leukemia_GSE9476.csv', 'r') as f:
df = pd.read_csv(f)
# saving the string version of the type column to use for labelling
df_type_copy = df['type']
df['type'] = pd.factorize(df['type'])[0] + 1
# UMAP projection of data
reducer = UMAP(n_neighbors=3, min_dist=0.6, n_components=3,
init='random', random_state=0)
embedding = reducer.fit_transform(df)
# Labels for axes of each plot
x_col = 'UMAP X'
y_col = 'UMAP Y'
z_col = 'UMAP Z'
x_col2 = 'FLT3'
y_col2 = 'NPM1'
z_col2 = 'IDH1'
# 3D arrays for the first and second plot
plot1_axes = embedding
plot2_axes = np.array(
df[['206674_at', '221923_s_at', '201193_at']].values.tolist())
#boxes = ['214983_at', '206082_at', '210794_s_at', '203591_s_at']
# default columns to use for the box-and-whisker plot if the user doesn't select anything
boxes = ['205780_at', '211725_s_at', '208478_s_at', '203728_at']
# storing the max and mins of each dataset for later use
max_x1 = max(plot1_axes[:, 0])
min_x1 = min(plot1_axes[:, 0])
max_y1 = max(plot1_axes[:, 1])
min_y1 = min(plot1_axes[:, 1])
max_z1 = max(plot1_axes[:, 2])
min_z1 = min(plot1_axes[:, 2])
max_x2 = max(plot2_axes[:, 0])
min_x2 = min(plot2_axes[:, 0])
max_y2 = max(plot2_axes[:, 1])
min_y2 = min(plot2_axes[:, 1])
max_z2 = max(plot2_axes[:, 2])
min_z2 = min(plot2_axes[:, 2])
box_plots = go.Figure()
#for col in boxes:
# box_plots.add_trace(go.Box(y=df[col], name=col))
#box_plots.add_trace(go.Box(y=df['petal_width']))
#box_plots.add_trace(go.Box(y=df['petal_length']))
# Creating the figures: each have one selection cube and whatever the plot will be
fig = go.Figure()
fig.add_trace(go.Mesh3d(name="cube",
# 8 vertices of a cube
x=[0, 0, 1, 1, 0, 0, 1, 1],
y=[0, 1, 1, 0, 0, 1, 1, 0],
z=[0, 0, 0, 0, 1, 1, 1, 1],
# i, j and k give the vertices of triangles
i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], opacity=0.20
))
fig2 = go.Figure()
fig2.add_trace(go.Mesh3d(name="cube",
# 8 vertices of a cube
x=[0 + min_x2, 0 + min_x2, 1 + min_x2, 1 + min_x2,
0 + min_x2, 0 + min_x2, 1 + min_x2, 1 + min_x2],
y=[0 + min_y2, 1 + min_y2, 1 + min_y2, 0 +
min_y2, 0 + min_y2, 1 + min_y2, 1 + min_y2, 0 + min_y2],
z=[0 + min_z2, 0 + min_z2, 0 + min_z2, 0 + min_z2, 1 +
min_z2, 1 + min_z2, 1 + min_z2, 1 + min_z2],
# i, j and k give the vertices of triangles
i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], opacity=0.20
))
#fig.add_trace(go.Scatter3d(
# name="Bone_Marrow_CD34", x=embedding[:, 0][0:8], y=embedding[:, 1][0:8], z=embedding[:, 2][0:8]))
#fig.add_trace(go.Scatter3d(
# name="Bone_Marrow", x=embedding[:, 0][9:18], y=embedding[:, 1][9:18], z=embedding[:, 2][9:18]))
#fig.add_trace(go.Scatter3d(
# name="AML", x=embedding[:, 0][19:44], y=embedding[:, 1][19:44], z=embedding[:, 2][19:44]))
#fig.add_trace(go.Scatter3d(
# name="PB", x=embedding[:, 0][45:53], y=embedding[:, 1][45:53], z=embedding[:, 2][45:53]))
#fig.add_trace(go.Scatter3d(
# name="PBSC_CD34", x=embedding[:, 0][54:63], y=embedding[:, 1][54:63], z=embedding[:, 2][54:63]))
fig.add_trace(go.Scatter3d(
name="selected", x=plot1_axes[:, 0], y=plot1_axes[:, 1], z=plot1_axes[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=1, color=df['type'])))
fig.add_trace(go.Scatter3d(
name="unselected", x=plot1_axes[:, 0], y=plot1_axes[:, 1], z=plot1_axes[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=0.5, color=df['type'])))
#fig2.add_trace(go.Scatter3d(
# name="unselected", x=embedding[:, 0], y=embedding[:, 1], z=embedding[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=0.5, color=df['type'])))
#fig2.add_trace(go.Scatter3d(
# name="selected", x=embedding[:, 0], y=embedding[:, 1], z=embedding[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=1, color=df['type'])))
fig2.add_trace(go.Scatter3d(
name="selected", x=plot2_axes[:, 0], y=plot2_axes[:, 1], z=plot2_axes[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=1, color=df['type'])))
fig2.add_trace(go.Scatter3d(
name="unselected", x=plot2_axes[:, 0], y=plot2_axes[:, 1], z=plot2_axes[:, 2], customdata=df_type_copy, hovertemplate="Type: %{customdata}", marker=dict(opacity=0.5, color=df['type'])))
fig.update_layout(showlegend = True)
#fig.update_xaxes(range=[1.5, 4.5])
#fig.update_yaxes(range=[3, 9])
#fig.update_yaxes(range=[3, 9])
# Organizing the app layout
app.layout = html.Div([dbc.Row([html.H1("Dash for 3D Data Exploration", style={'textAlign': 'center', 'color': '#7FDBFF'}),
html.H4(f'Select Variable for Propagation', style={
'color': '#7FDBFF'}),
dcc.Dropdown(list(df.columns),
id="box_dropdown", multi=True),
html.H5("Note: Hover over the points to see their color-coded classifications")]),
html.Br(),
dbc.Row([
dbc.Col([
dbc.Row(
html.H2("UMAP Projection of Leukemia gene expression")),
html.Br(),
dbc.Row(html.Div(children=[html.H4('Select on first graph:', style={
'color': '#7FDBFF', 'display': 'inline-block'}), daq.BooleanSwitch(id='include1', on=True, style={'display': 'inline-block'})])),
html.H4('Exploration Box Size:',
style={'color': '#7FDBFF'}),
dcc.Slider(1, 15, value=4, marks=None, id='size1',
tooltip={"placement": "bottom", "always_visible": True}),
dbc.Row(dcc.Graph(id='plot1', figure=fig)),
dbc.Row([
dbc.Col([html.H4(f'X axis: {x_col}', style={'color': '#7FDBFF'}),
dcc.Slider(min_x1, max_x1, marks=None, value=0, id='xslider1',
tooltip={"placement": "bottom", "always_visible": True})]),
dbc.Col([html.H4(f'Y axis: {y_col}', style={'color': '#7FDBFF'}),
dcc.Slider(min_y1, max_y1, marks=None, value=0, id='yslider1',
tooltip={"placement": "bottom", "always_visible": True})]),
dbc.Col([html.H4(f'Z axis: {z_col}', style={'color': '#7FDBFF'}),
dcc.Slider(min_z1, max_z1, marks=None, value=0, id='zslider1',
tooltip={"placement": "bottom", "always_visible": True})])
])], style={'border': '3px solid black'}
),
dbc.Col([
dbc.Row(
html.H2("Expression of top markers according to article below")),
html.A(
"Link to Paper", href="https://arupconsult.com/ati/acute-myeloid-leukemia-molecular-genetic-testing"),
html.Br(),
dbc.Row(html.Div(children=[html.H4('Select on second graph:', style={
'color': '#7FDBFF', 'display': 'inline-block'}), daq.BooleanSwitch(id='include2', on=True, style={'display': 'inline-block'})])),
html.H4('Exploration Box Size:',
style={'color': '#7FDBFF'}),
dcc.Slider(1, 15, marks=None, value=0, id='size2',
tooltip={"placement": "bottom", "always_visible": True}),
dbc.Row(dcc.Graph(id='plot2', figure=fig2)),
dbc.Row([
dbc.Col([
html.H4(f'X axis: {x_col2}', style={
'color': '#7FDBFF'}),
dcc.Slider(min_x2, max_x2, marks=None, value=min_x2, id='xslider2',
tooltip={"placement": "bottom", "always_visible": True})
]),
dbc.Col([html.H4(f'Y axis: {y_col2}', style={'color': '#7FDBFF'}),
dcc.Slider(min_y2, max_y2, marks=None, value=min_y2, id='yslider2',
tooltip={"placement": "bottom", "always_visible": True})
]),
dbc.Col([html.H4(f'Z axis: {z_col2}', style={'color': '#7FDBFF'}),
dcc.Slider(min_z2, max_z2, marks=None, value=min_z2, id='zslider2',
tooltip={"placement": "bottom", "always_visible": True})
])
])], style={'border': '3px solid black'}
),
dbc.Col(dcc.Graph(id='box', figure=box_plots))]
)])
# Step 4. Create a Dash layout
#list(df.columns)
# Step 5. Add callback functions
@app.callback(Output('plot1', 'figure'),
[Input('xslider1', 'value'),
Input('yslider1', 'value'),
Input('zslider1', 'value'),
Input('size1', 'value'),
Input('include1', 'on')])
def update_figure1(X, Y, Z, size, on):
# redraw the cube to the new coordinates
if on == True:
newX = [0 + X, 0 + X, 1 + X + size, 1 + X +
size, 0 + X, 0 + X, 1 + X + size, 1 + X + size]
newY = [0 + Y, 1 + Y + size, 1 + Y + size, 0 +
Y, 0 + Y, 1 + Y + size, 1 + Y + size, 0 + Y]
newZ = [0 + Z, 0 + Z, 0 + Z, 0 + Z, 1 + Z +
size, 1 + Z + size, 1 + Z + size, 1 + Z + size]
fig.update_traces(x=newX, y=newY, z=newZ, i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], opacity = 0.2, selector=dict(name="cube"))
#fig.update_xaxes(range=[1.5, 4.5])
#fig.update_yaxes(range=[3, 9])
#fig.update_yaxes(range=[3, 9])
fig.update_xaxes(range=[min_x1, max_x1])
fig.update_yaxes(range=[min_y1, max_y1])
fig.update_yaxes(range=[min_z1, max_z1])
# create a new dataframe that contains the filtered points (the points that the exploration box overlaps)
em_df = pd.DataFrame(plot1_axes, columns=['0', '1', '2'])
em_filtered = em_df[(((em_df['0'] >= X) & (em_df['0'] <= X+size))
& ((em_df['1'] >= Y) & (em_df['1'] <= Y+size))
& ((em_df['2'] >= Z) & (em_df['2'] <= Z+size)))].index.to_list()
df_filtered = em_df.filter(items=em_filtered, axis=0)
# If points have been selected, make those selected points 100% opacity, and the unselected points 50% opacity
if not df_filtered.empty:
fig.update_traces(x=df_filtered.iloc[:, 0], y=df_filtered.iloc[:, 1], z=df_filtered.iloc[:, 2], marker=dict(
opacity=1), selector=dict(name="selected"))
else:
fig.update_traces(marker=dict(opacity=0.5), selector=dict(name="selected"))
# If the user doesn't want to select, then hide the exploration box, and make all the points look unselected
else:
fig.update_traces(marker=dict(
opacity=0.5), selector=dict(name="selected"))
fig.update_traces(opacity=0, selector=dict(name="cube"))
return fig
# same as the above callback, but to update the second figure
@app.callback(Output('plot2', 'figure'),
[Input('xslider2', 'value'),
Input('yslider2', 'value'),
Input('zslider2', 'value'),
Input('size2', 'value'),
Input('include2', 'on')])
def update_figure2(X, Y, Z, size, on):
if on == True:
# redraw the cube to the new coordinates
newX = [0 + X, 0 + X, 1 + X + size, 1 + X +
size, 0 + X, 0 + X, 1 + X + size, 1 + X + size]
newY = [0 + Y, 1 + Y + size, 1 + Y + size, 0 +
Y, 0 + Y, 1 + Y + size, 1 + Y + size, 0 + Y]
newZ = [0 + Z, 0 + Z, 0 + Z, 0 + Z, 1 + Z +
size, 1 + Z + size, 1 + Z + size, 1 + Z + size]
fig2.update_traces(x=newX, y=newY, z=newZ, i=[7, 0, 0, 0, 4, 4, 6, 6, 4, 0, 3, 2],
j=[3, 4, 1, 2, 5, 6, 5, 2, 0, 1, 6, 3],
k=[0, 7, 2, 3, 6, 7, 1, 1, 5, 5, 7, 6], opacity = 0.2, selector=dict(name="cube"))
fig2.update_xaxes(range=[min_x2, max_x2])
fig2.update_yaxes(range=[min_y2, max_y2])
fig2.update_yaxes(range=[min_z2, max_z2])
em_df = pd.DataFrame(plot2_axes, columns=['0', '1', '2'])
em_filtered = em_df[(((em_df['0'] >= X) & (em_df['0'] <= X+size))
& ((em_df['1'] >= Y) & (em_df['1'] <= Y+size))
& ((em_df['2'] >= Z) & (em_df['2'] <= Z+size)))].index.to_list()
df_filtered = em_df.filter(items=em_filtered, axis=0)
if not df_filtered.empty:
fig2.update_traces(x=df_filtered.iloc[:, 0], y=df_filtered.iloc[:, 1], z=df_filtered.iloc[:, 2], marker=dict(
opacity=1), selector=dict(name="selected"))
else:
fig2.update_traces(marker=dict(opacity=0.5), selector=dict(name="selected"))
else:
fig2.update_traces(marker=dict(
opacity=0.5), selector=dict(name="selected"))
fig2.update_traces(opacity=0, selector=dict(name="cube"))
return fig2
#df_filtered = df['petal_length'].between(
# X-1, X, inclusive=True)
# add or delete any boxes based on the dropdown selections
# filter the dataframe based on the selection
#df_filtered = df[(((df[x_col] >= X) & (df[x_col] <= X+size+1))
# & ((df[y_col] >= Y) & (df[y_col] <= Y+size+1))
# & ((df[z_col] >= Z) & (df[z_col] <= Z+size+1)))]
# make sure this applies to all box plot traces in the figure
# Callback to update the box-and-whisker plot with what has been selected
@app.callback(Output('box', 'figure'), [Input('xslider1', 'value'),
Input('yslider1', 'value'),
Input('zslider1', 'value'),
Input('xslider2', 'value'),
Input('yslider2', 'value'),
Input('zslider2', 'value'),
Input('size1', 'value'),
Input('size2', 'value'),
Input('include1', 'on'),
Input('include2', 'on'),
Input('box_dropdown', 'value')])
def update_box(X1, Y1, Z1, X2, Y2, Z2, size1, size2, on1, on2, values):
em_df = pd.DataFrame(plot1_axes, columns=['0', '1', '2'])
em_df2 = pd.DataFrame(plot2_axes, columns=['0', '1', '2'])
if on1 == True:
em_filtered1 = em_df[(((em_df['0'] >= X1) & (em_df['0'] <= X1+size1))
& ((em_df['1'] >= Y1) & (em_df['1'] <= Y1+size1))
& ((em_df['2'] >= Z1) & (em_df['2'] <= Z1+size1)))].index.to_list()
else:
em_filtered1 = []
if on2 == True:
em_filtered2 = em_df2[(((em_df2['0'] >= X2) & (em_df2['0'] <= X2+size2))
& ((em_df2['1'] >= Y2) & (em_df2['1'] <= Y2+size2))
& ((em_df2['2'] >= Z2) & (em_df2['2'] <= Z2+size2)))].index.to_list()
if (on1 == True) and (on2==True):
em_filtered = np.intersect1d(em_filtered1, em_filtered2)
elif (on1==True) and (on2 == False):
em_filtered = em_filtered1
else:
em_filtered = em_filtered2
df_filtered = df.filter(items=em_filtered, axis=0)
current_boxes = []
for part in box_plots.data:
if part.name is not None: current_boxes.append(part.name)
if values is not None:
copy_boxes = values
else:
copy_boxes = boxes
for col in copy_boxes:
if(col not in current_boxes):
box_plots.add_trace(go.Box(y=df_filtered[col], name=col))
else:
box_plots.update_traces(
y=df_filtered[col], selector=dict(name=col))
return box_plots
def generate_box():
return
# Step 6. Add the server clause
if __name__ == '__main__':
app.run_server()
| EbruAyyorgun/3Dexplore | leukemia_dash_final.py | leukemia_dash_final.py | py | 17,987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dash.Dash",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dash_bootstrap_components.themes",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "panda... |
11367854731 | import torch
from torchvision import datasets, transforms
# Define paths for the data
data_dir = 'flowers'
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'
dirs = {
'train': train_dir,
'val': valid_dir,
'test': test_dir
}
# Define transforms for the training, validation, and testing sets
data_transforms = {
'train': transforms.Compose([
transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
'test': transforms.Compose([
transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
# Load the datasets with ImageFolder
image_datasets = {
x: datasets.ImageFolder(dirs[x], transform=data_transforms[x])
for x in ['train', 'val', 'test']
}
# Using the image datasets and the transforms, define the dataloaders
dataloaders = {
x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64, shuffle=True)
for x in ['train', 'val', 'test']
} | codeballet/image-classifier-flowers | get_data.py | get_data.py | py | 1,448 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torchvision.transforms.Compose",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.RandomRotation",
"line_number": 20,
"usage_type": "call"
}... |
463033195 | from base64 import decodebytes
import discord
from discord import embeds
from discord.ext import commands
import random
import datetime
from requests.models import Response
class Events(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_ready(self):
print("Events Cog has been loaded....\n")
@commands.Cog.listener()
async def on_member_join(self, member):
channel = self.bot.get_channel(585411891447463956)
response = [
f"Hopefully you wont regret joining {member.name}",
f"this is pretty much a really dead ass server but welcome {member.name}",
f"Enjoy the stay {member.name}",
f"This isnt a server for you if you easily can get offended, but welcome {member.name}",
f"I am not the admin bot, im just very based, hope you enjoy the stay {member.name}",
f"Retarded server but ok, have some fun {member.name}",
f"Oh i didnt notice you there, {member.name}, JK i did, dw im not your dad",
]
embed = discord.Embed(description=(random.choice(response)), colour=0xBF8040)
embed.set_thumbnail(url=member.avatar_url)
embed.set_author(name=member.name, icon_url=member.avatar_url)
embed.set_footer(text=member.guild, icon_url=member.guild.icon_url)
embed.timestamp = datetime.datetime.utcnow()
await channel.send(embed=embed)
@commands.Cog.listener()
async def on_member_remove(self, member):
channel = self.bot.get_channel(585411891447463956)
response = [
f"lol, pussy {member.name}",
f"no one really wanted you to stay here anyway {member.name}",
f"got offended and left to cry?, {member.name}",
f"aw you didnt get personal suck jobs from saam? lgbtq+ ass",
f"some corny dude aka, {member.name} left",
f"{member.name} was a pain in the asshole anyway",
]
embed = discord.Embed(
description=(random.choice(response)),
colour=0xBF8040,
)
embed.set_thumbnail(url=member.avatar_url)
embed.set_author(name=member.name, icon_url=member.avatar_url)
embed.set_footer(text=member.guild, icon_url=member.guild.icon_url)
embed.timestamp = datetime.datetime.utcnow()
await channel.send(embed=embed)
@commands.Cog.listener()
async def on_command_error(self, ctx, error):
ignored = (commands.CommandNotFound, commands.UserInputError)
if isinstance(error, ignored):
return
if isinstance(error, commands.CommandOnCooldown):
m, s = divmod(error.retry_after, 60)
h, m = divmod(m, 60)
if int(h) == 0 and int(m) == 0:
await ctx.send(
f"```you must wait {int(s)} seconds to use this command!```"
)
elif int(h) == 0 and int(m) != 0:
await ctx.send(
f"```you must wait {int(m)} minutes, {int(s)} seconds to use this command!```"
)
else:
await ctx.send(
f"```you must wait {int(h)}hours, {int(m)} minutes, {int(s)} seconds to use this command!```"
)
elif isinstance(error, commands.CheckAnyFailure):
await ctx.send(
"```I don't understand what you mean, can you refer to the help command```"
)
raise error
def setup(bot):
bot.add_cog(Events(bot))
| Oreofreakshake/fodifulhu-discord-bot | cogs/events.py | events.py | py | 3,572 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.Cog.listener",
"line_number": 15,
"usage_type": "call"
},
{
... |
14359245018 | from classifiers.NCC import NCC
from classifiers.NBC import NBC
from classifiers.GNB import GNB
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
from sklearn import datasets
import MNIST
import numpy as np
def modify_data(data):
modified = []
for d in data:
tmp = []
for value in d:
if value < 5:
tmp.append(0)
elif value < 10:
tmp.append(1)
else:
tmp.append(2)
modified.append(np.array(tmp))
return np.array(modified)
def modify_target(target):
modified = []
for t in target:
if t < 3:
modified.append(0)
elif t < 6:
modified.append(1)
else:
modified.append(2)
return np.array(modified)
def main():
# SciKitLearn digits (2.1)
digits = datasets.load_digits()
split = int(0.7 * digits.data.shape[0])
train_feature = digits.data[:split]
train_label = digits.target[:split]
test_feature = digits.data[split:]
test_labels = digits.target[split:]
# SciKitLearn digits summarised (2.2)
modified_digits_data = modify_data(digits.data)
modified_digits_labels = digits.target
train_feature_mod = modified_digits_data[:split]
train_label_mod = modified_digits_labels[:split]
test_feature_mod = modified_digits_data[split:]
test_label_mod = modified_digits_labels[split:]
# MNIST_Light (2.3)
mnist = MNIST.MNISTData('MNIST_Light/*/*.png')
train_features_mnist, test_features_mnist, train_labels_mnist, test_labels_mnist = mnist.get_data()
# gnb = GNB()
# gnb.fit(train_feature, train_label)
# y_pred = gnb.predict(test_feature)
# gnb = GaussianNB()
# gnb.fit(train_feature, train_label)
# y_pred = gnb.predict(test_feature)
#
# print("Classification report SKLearn GNB:\n%s\n"
# % (metrics.classification_report(test_labels, y_pred)))
# print("Confusion matrix SKLearn GNB:\n%s" % metrics.confusion_matrix(test_labels, y_pred))
ncc = NCC()
ncc.fit(train_feature, train_label)
y_pred = ncc.predict(test_feature)
print("Classification report NCC (dataset 2.1):\n%s\n"
% (metrics.classification_report(test_labels, y_pred)))
print("Confusion matrix NCC:\n%s" % metrics.confusion_matrix(test_labels, y_pred))
ncc2 = NCC()
ncc2.fit(train_feature_mod, train_label_mod)
y_pred_mod = ncc2.predict(test_feature_mod)
print("Classification report NCC (dataset 2.2):\n%s\n"
% (metrics.classification_report(test_label_mod, y_pred_mod)))
print("Confusion matrix NCC:\n%s" % metrics.confusion_matrix(test_label_mod, y_pred_mod))
ncc3 = NCC()
ncc3.fit(train_features_mnist, train_labels_mnist)
y_pred_mnist = ncc3.predict(test_features_mnist)
print("Classification report NCC (dataset 2.3):\n%s\n"
% (metrics.classification_report(test_labels_mnist, y_pred_mnist)))
print("Confusion matrix NCC:\n%s" % metrics.confusion_matrix(test_labels_mnist, y_pred_mnist))
if __name__ == "__main__":
main() | niklashedstrom/EDAN95 | lab5/main.py | main.py | py | 3,091 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "sklearn.datasets.load_digits",
... |
28896311683 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import os
from os import path
import re
class WeatherMax():
'''
pass file directory path with forward slashes
'''
def __init__(self,directorypath):
self.directorypath = directorypath
self.df = None
#self.weather_result
#self.date
#self.time
#self.region
def csvtodataset(self):
os.chdir(self.directorypath)
#read csv files in directory
def patable(self):
for file in os.listdir():
if file.endswith(".csv"):
file1 = file
df = pd.read_csv(os.path.abspath(file1))
#select required columns and enrich data with partition attributes
df = df[['ForecastSiteCode','ObservationTime','ObservationDate','ScreenTemperature','SiteName','Region']]
df = df.sort_values(by=['ForecastSiteCode','ObservationDate','ObservationTime'])
df = df.reset_index(drop=True)
df['ObsYear'] = pd.DatetimeIndex(df['ObservationDate']).year
df['ObsMonth'] = pd.DatetimeIndex(df['ObservationDate']).month
df['ObsDay'] = pd.DatetimeIndex(df['ObservationDate']).day
table = pa.Table.from_pandas(df)
#create additional files for testing
file1 = file1.replace(".csv",".")
file2 = file1 + 'parquet.snappy'
pq.write_table(table, file2,compression='snappy')
pq.write_to_dataset(table,root_path='weather_results',partition_cols=['ObsYear','ObsMonth','Region'])
#create folder schema
def builddataset(self):
self.patable()
def weathertable(self):
weather_data = pq.ParquetDataset('weather_results/')
table = weather_data.read()
weather_table_df = table.to_pandas()
return weather_table_df
def weathermax_row(self):
weather_table_df = self.weathertable()
weather_result = weather_table_df.loc[weather_table_df['ScreenTemperature'].idxmax()]
return weather_result
#return max temp date
def weather_date(self):
weather_result = self.weathermax_row()
date = weather_result['ObservationDate'][:9]
return date
#return max temp
def weather_temp(self):
weather_result = self.weathermax_row()
temp = weather_result['ScreenTemperature']
return temp
#return max temp region
def weather_region(self):
weather_result = self.weathermax_row()
region = weather_result['Region']
return region
#print questions and answers
def prntresult(self):
a = "Which date was the hottest day? " + str(self.weather_date()) +'\n'
b = "What was the (max) temperature on that day? " + str(self.weather_temp()) +'\n'
c = "In which region was the hottest day? " + str(self.weather_region())
result = print(a,b,c)
return result
| bigdubya/Data_Engineer_DLG | Main_folder/weathermax.py | weathermax.py | py | 3,239 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_numbe... |
2216377889 | import cv2
import numpy as np
from PIL import Image
def main():
capture = cv2.VideoCapture(0)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
ret, frame = capture.read()
cv2.imshow('Current', frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
image = Image.fromarray(gray)
qrDetector=cv2.QRCodeDetector()
retval , points ,straight_qrcode = qrDetector.detectAndDecode(frame)
if len(retval)>0:
print(retval)
straight_qrcode = np.uint8(straight_qrcode)
cv2.imshow('rectified qr code',straight_qrcode)
if __name__ == "__main__":
code = main()
| jpbel65/Robot-D3-E9-H2019 | scripts/QRTest.py | QRTest.py | py | 680 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number... |
75216111782 | import time
#import img2txt
import os
import json
from PIL import Image
def load_file(fpath): # fpath是具体的文件 ,作用:#str to list
assert os.path.exists(fpath) # assert() raise-if-not
with open(fpath, 'r') as fid:
lines = fid.readlines()
records = [json.loads(line.strip('\n')) for line in lines] # str to list
return records
def img2txt(odgtpath, respath):
records = load_file(odgtpath) # 提取odgt文件数据
record_list = len(records) # 获得record的长度,循环遍历所有数据。
print(os.getcwd())
# os.mkdir(os.getcwd() + respath)
with open(respath, 'w') as txt:
for i in range(record_list):
file_name = records[i]['ID'] + '.jpg'
file_name = str("/Image/" + file_name)
txt.write(file_name + '\n')
def tonormlabel(odgtpath, storepath):
records = load_file(odgtpath) #records = img2txt.load_file(odgtpath)
record_list = len(records)
print(record_list)
categories = {}
# txt = open(respath, 'w')
for i in range(record_list):
txt_name = storepath + records[i]['ID'] + '.txt'
file_name = records[i]['ID'] + '.jpg'
#print(i)
im = Image.open("./Image/" + file_name)
height = im.size[1]
width = im.size[0]
file = open(txt_name, 'w')
gt_box = records[i]['gtboxes']
gt_box_len = len(gt_box) # 每一个字典gtboxes里,也有好几个记录,分别提取记录。
for j in range(gt_box_len):
category = gt_box[j]['tag']
if category not in categories: # 该类型不在categories,就添加上去
new_id = len(categories) + 1 # ID递增
categories[category] = new_id
category_id = categories[category] # 重新获取它的类别ID
fbox = gt_box[j]['fbox'] # 获得全身框
# norm_x = fbox[0] / width
# norm_y = fbox[1] / height
norm_w = fbox[2] / width
norm_h = fbox[3] / height
norm_x = fbox[0] / width + 0.5 * norm_w
norm_y = fbox[1] / height + 0.5 * norm_h
'''
norm_x = 0 if norm_x <= 0 else norm_x
norm_x = 1 if norm_x >= 1 else norm_x
norm_y = 0 if norm_y <= 0 else norm_y
norm_y = 1 if norm_y >= 1 else norm_y
norm_w = 0 if norm_w <= 0 else norm_w
norm_w = 1 if norm_w >= 1 else norm_w
norm_h = 0 if norm_h <= 0 else norm_h
norm_h = 1 if norm_h >= 1 else norm_h
'''
blank = ' '
if j == gt_box_len-1:
file.write(str(category_id - 1) + blank + '{:.6f}'.format(norm_x) + blank + '{:.6f}'.format(norm_y) + blank
+ '{:.6f}'.format(norm_w) + blank + '{:.6f}'.format(norm_h))
else:
file.write(str(category_id - 1) + blank + '{:.6f}'.format(norm_x) + blank + '{:.6f}'.format(norm_y) + blank
+ '{:.6f}'.format(norm_w) + blank + '{:.6f}'.format(norm_h) + '\n')
if __name__ == '__main__':
odgtpath = "./annotation_val.odgt" #"/datasets/crowdhuman/annotation_train.odgt"
respath = "./annotation_name.txt" #"/datasets/crowdhuman/train_name.txt"
storepath = "./annotation/" #"/datasets/crowdhuman/labels/train_all/Image/"
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))) # 格式化输出时间
start = time.time()
tonormlabel(odgtpath, storepath)
end = time.time()
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
print('已完成轉換,共耗時{:.5f}s'.format(end - start)) | JoeLiHai/Mask_Detection_and_Social_Distance | data_preprocess/odgt2txt.py | odgt2txt.py | py | 3,721 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 1... |
24955171162 | from fastapi import APIRouter
from BusinessLayer.PersonaNatural import *
from EntityLayer.PersonaNaturalEntity import *
from fastapi.encoders import jsonable_encoder
from Utilidades.Entidades.ResponseAPI import ResponseAPI, ResponseAPIError
PersonaNaturalRouter = APIRouter()
ApiName = "PersonaNatural"
@PersonaNaturalRouter.post(f"/api/{ApiName}/Save", tags=[ApiName])
def Save(Ent: PersonaNaturalSaveModel):
try:
Ent = PersonaNatural.Save(Ent)
return jsonable_encoder(ResponseAPI.Response(Ent))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
@PersonaNaturalRouter.get(f"/api/{ApiName}/GetItems/", tags=[ApiName])
def GetItems():
try:
jsonData = PersonaNatural.GetItems()
return jsonable_encoder(ResponseAPI.Response(jsonData))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
@PersonaNaturalRouter.get(f"/api/{ApiName}/GetItem/{{Id}}/", tags=[ApiName])
def GetItem(Id: int):
try:
jsonData = PersonaNatural.GetItem(Id)
return jsonable_encoder(ResponseAPI.Response(jsonData))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
@PersonaNaturalRouter.delete(f"/api/{ApiName}/Delete/{{Id}}", tags=[ApiName])
def Delete(Id: int):
try:
jsonData = PersonaNatural.Delete(Id)
return jsonable_encoder(ResponseAPI.Response(jsonData))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
@PersonaNaturalRouter.get(f"/api/{ApiName}/GetMainItems", tags=[ApiName])
def GetMainItems():
try:
jsonData = PersonaNatural.GetMainItems()
return jsonable_encoder(ResponseAPI.Response(jsonData))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
@PersonaNaturalRouter.get(f"/api/{ApiName}/GetCabeceraItem/{{Id}}/", tags=[ApiName])
def GetCabeceraItem(Id: int):
try:
jsonData = PersonaNatural.GetCabeceraItem(Id)
return jsonable_encoder(ResponseAPI.Response(jsonData))
except Exception as e:
print(e)
return jsonable_encoder(ResponseAPIError.Error())
| josedtl/AlmacenLogistico | Proyecto/server/routes/PersonaNaturalRoute.py | PersonaNaturalRoute.py | py | 2,243 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fastapi.encoders.jsonable_encoder",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "Utilidades.Entidades.ResponseAPI.ResponseAPI.Response",
"line_number": 15,
"usage_type... |
14049558832 | from . import views
from django.urls import path
urlpatterns = [
path('admin_dashboard', views.admin_dashboard, name="admin_dashboard"),
path('admin_packages', views.admin_packages, name="admin_packages"),
path('update_package/<package_id>', views.update_package, name="update_package"),
path('delete_package/<package_id>', views.delete_package, name="delete_package"),
] | aniatki/pro-dad | admin_dashboard/urls.py | urls.py | py | 389 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
7570177447 | from tkinter import Frame, DoubleVar
from tkinter.ttk import Scale
from typing import Dict, Tuple, List
from ui.misc import NoneTypeCheck
from ui.Components import Component
class Range(Component):
"""
Tkinter range value
"""
def __init__(self, parent: Frame, style: Dict, geometry: Tuple[int] | List[int]):
super().__init__(parent, style, geometry)
self.start = NoneTypeCheck(style.get("start"), 0)
self.end = NoneTypeCheck(style.get("end"), 10)
self.defaultValue = NoneTypeCheck(style.get("default"), 1)
self.value = DoubleVar()
self.fetchData = self.get
self.setComponent()
def setComponent(self):
self.component = Scale(self.componentFrame,
from_=self.start,
to=self.end,
variable=self.value,
value=self.defaultValue)
def get(self):
return self.value.get()
| TwoSails/maze-generator | ui/Components/range.py | range.py | py | 987 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "ui.Components.Component",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tkinter.Frame",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
... |
31564159525 | #!/usr/bin/env python3
"""
1 Aug 2022 Taeho Choi created for longhorn API python client
KUBECONFIG needs to be imported properly before running this script
"""
import time
import longhorn
import os
import json
#Warning msg to user
print("#"*40)
print("!! WARNING: This script can cause catastrophic consequences to your infrastructure !! ")
print("!! Please make sure you understand what you are trying to achieve clearly !! ")
print("#"*40)
print("Please make sure you have imported k8s config file correctly")
time.sleep(1)
print("Running kube proxy to redirect svc port to local")
os.system("kubectl port-forward services/longhorn-frontend 8080:http -n longhorn-system & ")
time.sleep(3)
#not using proxy since it will be internal network
os.system("export http_proxy= ; export https_proxy= ")
# If automation/scripting tool is inside the same cluster in which Longhorn is installed
longhorn_url = 'http://longhorn-frontend.longhorn-system/v1'
# If forwarding `longhorn-frontend` service to localhost
longhorn_url = 'http://localhost:8080/v1'
#Check to see if port is listening
os.system("curl http://localhost:8080/v1")
#Create longhorn client object with given URL
client = longhorn.Client(url=longhorn_url)
def print_menu():
print("\n")
for key in menu_options.keys():
print (key, '--', menu_options[key] )
print("\n")
#Volume Ops
def option1():
print('Handle option \'Option 1\'')
volumes = client.list_volume()
vol_json = json.loads(json.dumps(volumes,default=vars))
print("#"*200)
print("These are the longhorn volumes")
print("#"*200+"\n")
for _ijson in vol_json["data"]:
try:
_pod_name = _ijson["kubernetesStatus"]['workloadsStatus'][0].get('podName')
except TypeError:
_pod_name = "N/A"
print("ID:{} STATE:{} CONTROLLER:{:<15} SIZE:{:<4}GB NS:{:<15} POD_NAME:{:<15}".format(_ijson["id"], _ijson["state"], _ijson["replicas"][0].get("hostId"),int( _ijson["size"])/1024/1024/1024,_ijson["kubernetesStatus"].get("namespace"), _pod_name))
def option2():
print('Handle option \'Option 2\'')
vol_id = input("what is the volume name or id to check: ")
output = client.by_id_volume(id=vol_id)
print(json.dumps(output, indent=4, default=vars))
def option3():
print('Handle option \'Option 3\'')
vol_id = input("what is the volume name or id to attach: ")
node_id = input("what is the node id to attach: ")
testvol1 = client.by_id_volume(id=vol_id)
output = testvol1.attach(hostId=node_id)
print(json.dumps(output, indent=4, default=vars))
def option4():
print('Handle option \'Option 4\'')
vol_id = input("what is the volume name or id to detach: ")
testvol1 = client.by_id_volume(id=vol_id)
print(type(testvol1))
print(testvol1)
output = testvol1.detach()
print(json.dumps(output, indent=4, default=vars))
def option5():
print('Handle option \'Option 5\'')
vol_id = input("what is the volume name or id for snapshot: ")
snapshot = input("what is the snapshot name to create: ")
vol_client = client.by_id_volume(id=vol_id)
output = vol_client.snapshotCreate(name=snapshot)
print(json.dumps(output, indent=4, default=vars))
def option6():
print('Handle option \'Option 6\'')
vol_id = input("what is the volume name or id for backup: ")
snapshot_name = input("what is the snampshot name to backup: ")
vol_client = client.by_id_volume(id=vol_id)
output = vol_client.snapshotBackup(name=snapshot_name)
print(json.dumps(output, indent=4, default=vars))
def option7():
print('Handle option \'Option 7\'')
vol_id = input("what is the volume name or id to update replica count: ")
no_rep = int(input("what is the new no of replica count?: "))
vol_client = client.by_id_volume(id=vol_id)
output = vol_client.updateReplicaCount(replicaCount=no_rep)
print(json.dumps(output, indent=4, default=vars))
#Node Ops
def option8():
print('Handle option \'Option 8\'')
volumes = client.list_node()
vol_json = json.loads(json.dumps(volumes,default=vars))
print("#"*40)
print("These are the longhorn nodes")
print("#"*40)
for i in vol_json["data"]:
print(i["id"])
print("#"*40)
def option9():
print('Handle option \'Option 9\'')
node_id = input("what is the node id or name to check: ")
output = client.by_id_node(id=node_id)
print(json.dumps(output, indent=4, default=vars))
def option10():
print('Handle option \'Option 10\'')
node_id = input("what is the node id or name to disable: ")
node1_obj = client.by_id_node(id=node_id)
output = client.update(node1_obj, allowScheduling=False)
print(json.dumps(output, indent=4, default=vars))
def option11():
print('Handle option \'Option 11\'')
node_id = input("what is the node id or name to enable: ")
node1_obj = client.by_id_node(id=node_id)
output = client.update(node1_obj, allowScheduling=True)
print(json.dumps(output, indent=4, default=vars))
#Setting Ops
def option12():
print('Handle option \'Option 12\'')
settings = client.list_setting()
settings_json = json.loads(json.dumps(settings,default=vars))
print("#"*40)
print("These are the longhorn cluster settings")
print("#"*40)
print(json.dumps(settings,indent=4,default=vars))
def option13():
print('Handle option \'Option 13\'')
setting_id = input("what is setting id or name to check: ")
output = client.by_id_setting(id=setting_id)
print(json.dumps(output, indent=4, default=vars))
def option14():
print('Handle option \'Option 14\'')
setting_id = input("what is the setting id or name to update: ")
new_val = input("what is the new value for the setting: ")
setting_obj = client.by_id_setting(id=setting_id)
output = client.update(setting_obj, value=new_val)
print(json.dumps(output, indent=4, default=vars))
def option15():
print('Handle option \'Option 15\'')
print('Thanks message before exiting')
exit()
#Set dict for menu option and func mapping
menu_options = {
#Volume operation
1: 'List all volumes',
2: 'Get volume by NAME/ID',
3: 'Attach volume to node',
4: 'Detach TESTVOL1',
5: 'Create a snapshot of TESTVOL1 with NAME',
6: 'Create a backup from a snapshot NAME',
7: 'Update the number of replicas of TESTVOL1',
#node operation
8: 'List all nodes',
9: 'Get node by NAME/ID',
10: 'Disable scheduling for NODE1',
11: 'Enable scheduling for NODE1',
#Setting operation
12: 'List all settings',
13: 'Get setting by NAME/ID',
14: 'Update a setting',
15: 'Exit',
}
options = {
1: option1,
2: option2,
3: option3,
4: option4,
5: option5,
6: option6,
7: option7,
8: option8,
9: option9,
10: option10,
11: option11,
12: option12,
13: option13,
14: option14,
15: option15,
}
if __name__=='__main__':
while(True):
print_menu()
option = ''
try:
option = int(input('Enter your choice: '))
except:
print('Wrong input. Please enter a number ...')
#Check what choice was entered and act accordingly
if option in options:
options[option]()
else:
print('Invalid option. Please enter a number between 1 and 15.')
| nogodan/longhorn_python_client | longhorn_client.py | longhorn_client.py | py | 7,674 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "time.sleep",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.system",
"line_number": 27,
... |
33627483742 | from sklearn.model_selection import train_test_split
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
from train_lgbm import *
import lightgbm as lgb
from torch.utils.data import TensorDataset,DataLoader
import torch.nn.functional as F
from sklearn.model_selection import train_test_split
import math
import time
import random
from sklearn.preprocessing import *
from model_train_util_vectorized import *
import sys
import os
import os.path as osp
import time
import datetime
from itertools import repeat
from typing import Optional
import gc
import pickle
from sklearn.preprocessing import *
from sys import argv
Device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device('cpu')
print('using device:', Device)
dataset_name = argv[1]
use_gbm = argv[2]
use_botspotpp = argv[3]
ROOT_DIR = f'{dataset_name}/'
class BotSpotTrans(object):
def __init__(self,train_filepath,test_filepath):
train_file = train_filepath
test_file = test_filepath
print("Loading train & test file...")
train_df = pd.read_csv(train_file) # ѵ����
test_df = pd.read_csv(test_file) # ���Լ�
total_df = pd.concat([train_df, test_df], axis=0)
print("Graph Generating...")
self.edge_index = total_df[["combin_index", "device_index", "target"]].astype(int).values
self.edge_index_train = train_df[["combin_index", "device_index", "target"]].astype(int).values
self.edge_index_test = test_df[["combin_index", "device_index", "target"]].astype(int).values
# stat_columns��ʾ����ͳ����ص�������list
# category_columns��ʾ����category��������list
# stat_columns���������ctit��cvr_total, category_columns��һ����label encoder���channel_id
stat_columns_file = osp.join(ROOT_DIR, "stat_columns.txt")
category_columns_file = osp.join(ROOT_DIR, "category_columns.txt")
stat_columns = self.pickle_load(stat_columns_file)
category_columns = self.pickle_load(category_columns_file)
# ����������
feature_columns = stat_columns + category_columns
# �������ֻ��ctit����normalization
normalized_columns = [stat_columns[-2]]
except_normalized_columns = [column for column in stat_columns if column not in normalized_columns]
# channel-campagin��ص�����ѵ����������, ������ctit��ͳ������channel_id(���һ��)
combin_feature_columns = except_normalized_columns + [category_columns[0]]
# device�ڵ���ص�����ѵ���������У�����ctit + ʣ�µ�category��
device_feature_columns = normalized_columns + category_columns[1:]
#���е�channel-campaign��
device_columns = ["device_index"] + device_feature_columns
# ���е�device����
combin_columns = ["combin_index"] + combin_feature_columns
#��������Ǵ�total_df���ֳ�device_df,���������Ҫ�������train_dfҲһ��
device_df = total_df[device_columns].sort_values(["device_index"])
device_df.drop_duplicates(subset="device_index", keep="first", inplace=True)
#ͬ��Ҳ�Ǵ�total_df���ֳ���combin_df,���������Ҫ���е���
combin_df = total_df[combin_columns].sort_values(["combin_index"])
combin_df.drop_duplicates(subset="combin_index", keep="first", inplace=True)
# ��ctit�н��������С��һ��
norm_data = RobustScaler().fit_transform(device_df.loc[:,normalized_columns[0]].values.reshape((-1,1)))
device_df.loc[:,normalized_columns[0]] = norm_data.reshape(-1,)
print("feature matrix generating...")
self.device_matrix = device_df[device_feature_columns].values
# �����folat�ľ���ת�����float16�����Ը��������Ҫ����
self.combin_matrix = combin_df[combin_feature_columns].astype('float16').values
def pickle_dump(self, data, filename):
with open(filename, 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def pickle_load(self, filename):
with open(filename, 'rb') as f:
return pickle.load(f)
class Self_Attention(nn.Module):
# a classical self_attention module as depicted in BERT
def __init__(self, emb_size, num_head):
super(Self_Attention, self).__init__()
self.Q = nn.ModuleList([])
self.K = nn.ModuleList([])
self.V = nn.ModuleList([])
output_size = emb_size // num_head
self.output_size = output_size
self.num_head = num_head
self.final_linear = nn.Linear(emb_size, emb_size)
for i in range(num_head):
self.Q.append(nn.Linear(emb_size, output_size))
self.K.append(nn.Linear(emb_size, output_size))
self.V.append(nn.Linear(emb_size, output_size))
def calc_attention(self, X, Q, K, V):
query = Q(X)
key = K(X)
value = V(X)
key_ = key.transpose(2, 1).contiguous()
attn_weights = torch.softmax(torch.bmm(query, key_) / math.sqrt(self.output_size), dim=-1)
output = torch.bmm(attn_weights, value)
return output
def forward(self, X):
outputs = []
for i in range(self.num_head):
q, k, v = self.Q[i], self.K[i], self.V[i]
out = self.calc_attention(X, q, k, v)
outputs.append(out)
outputs = torch.cat(outputs, dim=-1)
return self.final_linear(outputs).mean(dim=1)
class BotSpot(nn.Module):
def __init__(self,edge_index_train,edge_index_test,channel_feats,device_feats,device_package = None,use_enhance_botspot = False,use_gbm = False,gbm_model = None):
super(BotSpot,self).__init__()
if use_gbm:
leaf_dim = 20
assert gbm_model is not None
self.leaf_emb_models = nn.ModuleList()
for n in range(gbm_model.n_estimators):
self.leaf_emb_models.append(nn.Embedding(31, leaf_dim)) # 31 is the max depth of decision tree
self.gbm_best_model = gbm_model
self.use_gbm = use_gbm
self.self_attn_module = Self_Attention(32, 2)
self.use_enhanced_botspot = use_enhance_botspot
self.edge_index_train = edge_index_train
self.edge_index_test = edge_index_test
self.edge_num_train = edge_index_train.shape[0]
self.edge_num_test = edge_index_test.shape[0]
self.channel_feats = torch.from_numpy(channel_feats).float() # feature matrix for channel-campaign nodes
self.device_feats = torch.from_numpy(device_feats).float() # package_name already removed in the device feature matrix
N_chan = channel_feats.shape[1]-1 + 16 # remove one col and add embeeding_size of 16
N_dev = 16*(device_feats.shape[1]-1)+1 # add one col of ctit, others are embeddings
self.edge_matrix = self.gen_adj_matrix() # generate adj matrix of shape (N_device,N_channel_campaign)
self.device_split_val = 1 # the first col is ctit for device
self.channel_split_val = -1 # the last col is channel id
self.channel_neibr_cache = {} # cache device neighbors as a boolean array of channel_campaign nodes for training stage
self.channel_neibr_test_cache = {} # cache neighbors as boolean array of channel_campaign nodes for test stage
self.device_idx_cache = {} # cache device indices for a channel-campaign node
if use_enhance_botspot:
assert device_package is not None
self.device_package = device_package
self.super_device_neibr_cache = {} # cache channel_campaign indices for each super device node for training set
self.super_device_neibr_test_cache = {} # cache channel_campaign indices for each super device node for testing set
# precompute channel neighbors and cache it
N = self.edge_matrix.shape[1]
for i in range(N):
self.channel_neibr_cache[i] = self.edge_matrix[:,i]
# N_pack = len(device_package)
# precompute the super device neighbors for training set if botspot++ is used
if self.use_enhanced_botspot:
for idx,d in enumerate(device_package):
if d not in self.super_device_neibr_cache:
self.super_device_neibr_cache[d] = set()
t_ = np.where(self.edge_matrix[idx]==1)[0]
for v in t_:
self.super_device_neibr_cache[d].add(v)
for d in self.super_device_neibr_cache:
self.super_device_neibr_cache[d] = list(self.super_device_neibr_cache)
self.set_adj_test_matrix(True) # modify edge_matrix to include test edges
for i in range(N):
self.channel_neibr_test_cache[i] = self.edge_matrix[:,i]
# precompute super device neighbors for test set, cache them in super_device_neibr_test_cache
if self.use_enhanced_botspot:
for idx,d in enumerate(device_package):
if d not in self.super_device_neibr_test_cache:
self.super_device_neibr_test_cache[d] = set()
t_ = np.where(self.edge_matrix[idx]==1)[0]
for v in t_:
self.super_device_neibr_test_cache[d].add(v)
for d in self.super_device_neibr_test_cache:
self.super_device_neibr_test_cache[d] = list(self.super_device_neibr_test_cache)
self.set_adj_test_matrix(False) # modify the edge matrix to exclude test edges to proceed on training
# initialze embedding matrix
emb_size = 16
channel_id_max = int(channel_feats[:, -1].max() + 1)
temp = np.max(device_feats[:, 1:], axis=0) + 1
temp = [int(i) for i in temp]
lang, plat, os, country, carrier, device_brand, plat_os = temp # bypass install city, be careful
self.channel_id_emb = nn.Embedding(channel_id_max, emb_size)
self.carrier_emb = nn.Embedding(carrier, emb_size)
self.language_emb = nn.Embedding(lang, emb_size)
self.device_brand_emb = nn.Embedding(device_brand, emb_size)
self.plat_os_emb = nn.Embedding(plat_os, emb_size)
self.plat_emb = nn.Embedding(plat, emb_size)
self.os_emb = nn.Embedding(os, emb_size)
self.country_emb = nn.Embedding(country, emb_size)
# device modules if there is no super device convolution
if not self.use_enhanced_botspot:
self.dev_linear1 = nn.Linear(N_dev,int(0.6*N_dev)) # NOT also used in channel side for convolving device feats
self.dev_relu1 = nn.ReLU()
self.dev_dropout1 = nn.Dropout(0.2)
self.dev_linear2 = nn.Linear(int(0.6*N_dev),int(0.75*0.6*N_dev))
self.dev_relu2 = nn.ReLU()
# channel linear and message passing modules
self.channel_linear1 = nn.Linear(N_chan,int(0.6*N_chan))
self.channel_msg_pass1 = nn.Linear(N_dev,int(0.6*N_dev))
fusion_input = int(0.6*N_chan) + int(0.6*N_dev)
self.fusion_linear1 = nn.Linear(fusion_input,int(0.6*fusion_input))
self.fusion_relu1 = nn.ReLU()
self.fusion_dropout1 = nn.Dropout(0.2)
fusion_output_dim = int(0.6*fusion_input)
device_output_dim = int(0.75*0.6*N_dev)
if not self.use_enhanced_botspot:
# concat modules if no botspot++
concat_input_dim = fusion_output_dim+device_output_dim if not self.use_gbm else fusion_output_dim+device_output_dim+leaf_dim
self.concat_linear1 = nn.Linear(concat_input_dim,int(0.6*concat_input_dim))
self.concat_relu1 = nn.ReLU()
self.concat_linear2 = nn.Linear(int(0.6*concat_input_dim),int(0.5*0.6*concat_input_dim))
self.concat_relu2 = nn.ReLU()
self.concat_linear3 = nn.Linear(int(0.5*0.6*concat_input_dim),1)
else:
# device side gnn if botspot++ is used
self.dev_linear1 = nn.Linear(N_dev,int(0.6*N_dev)) # NOT also used in channel side for convolving device feats
# self.device_msg_passing = nn.Linear(N_chan,int(0.6*N_chan))
in_dim = int(0.6*N_dev) + int(0.6*N_chan)
self.sup_dev_fusion_linear1 = nn.Linear(in_dim,int(0.6*in_dim))
self.sup_dev_fusion_relu1 = nn.ReLU()
self.sup_dev_fusion_dropout1 = nn.Dropout(0.2)
# concat layer for botspot++
sup_dev_fusion_output_dim = int(0.6*in_dim)
concat_input_dim = fusion_output_dim+sup_dev_fusion_output_dim if not self.use_gbm else fusion_output_dim+sup_dev_fusion_output_dim+ leaf_dim
self.concat_linear1 = nn.Linear(concat_input_dim,int(0.6*concat_input_dim))
self.concat_relu1 = nn.ReLU()
self.concat_linear2 = nn.Linear(int(0.6*concat_input_dim),int(0.5*0.6*concat_input_dim))
self.concat_relu2 = nn.ReLU()
self.concat_linear3 = nn.Linear(int(0.5*0.6*concat_input_dim),1)
def to_emb(self, arr, *models):
'''
:param arr: matrix for holding high-cardinality features, without one-hot encoding
:param left: channel node if left is True else device node
:param models: a list of embedding matrices to embed each high-cardinality feature to dense embeddings
:return: 2-d tensor with dense embeddings for all the high-cardinality features.
'''
out_arr = []
# arr = torch.from_numpy(arr)
arr = arr.long().to(Device)
# device node sparse features
# N = arr.shape[0]
num_models = len(models)
for i in range(len(models)):
# if num_models > 2 and i == 4: # bypass install city, hardcoded
# continueed
# print (i,models[i])
out_arr.append(models[i](arr[:, i]))
return torch.cat(out_arr, dim=1)
def concat_device_feats(self, dev_feats): # NEED TO MODIFY IT
'''
this method invokes to_emb to embed device categorical features into dense embeddings
:param dev_feats: normalized device features
:param more_dev_feats: feature matrix with high-cardinality features
:return: feature matrix with dense embeddings
'''
dev_feats = dev_feats.to(Device)
cat_dev_feats = dev_feats[:, self.device_split_val:]
emb_tensor = self.to_emb(cat_dev_feats, self.language_emb,
self.plat_emb, self.os_emb, self.country_emb, self.carrier_emb,
self.device_brand_emb, self.plat_os_emb)
dev_emb_feats = torch.cat((dev_feats[:, :self.device_split_val], emb_tensor), dim=1).float().to(Device)
return dev_emb_feats
def concat_channel_feats(self, chan_feats):
'''
this method invokes to_emb to embed channel_campaign node's categorical feature into dense embeddings
similar to concat_device_feats, to add dense embeddings
'''
chan_feats = chan_feats.to(Device)
emb_tensor = self.to_emb(chan_feats[:, self.channel_split_val:], self.channel_id_emb)
# print (chan_feats[:, :self.channel_split_val].shape)
# print (emb_tensor.shape)
return torch.cat((chan_feats[:, :self.channel_split_val], emb_tensor), dim=1).float().to(Device)
def gen_adj_matrix(self):
"""
this method build ajdacency matrix of shape (N_device,N_channel).
adj_matrix[j,i] = True indicates device node j connects to channel-campaign node i
"""
e = np.vstack((self.edge_index_train,self.edge_index_test))
N_dev = np.max(e[:,1])+1
N_channel = np.max(e[:,0])+1
adj_matrix = np.zeros((N_dev,N_channel),dtype = bool)
for i,j,_ in self.edge_index_train:
adj_matrix[j,i] = True
return adj_matrix
def set_adj_test_matrix(self,set_test = True):
"""
this method modifies edge_matrix
when set_test = True, edge_matrix includes test edges in it, otherwise does not
"""
count_not_in_train = 0
for i,j,_ in self.edge_index_test:
if set_test:
self.edge_matrix[j,i] = True
else:
self.edge_matrix[j,i] = False
return
def sample_minibatch(self,channel_vertices,device_vertices,train_stage = True):
"""
input:
for a minitach of edges, channel_vertices is edge[:,0], device_vertices is edge[:,1]
this method takes a minibatch of edges outputs:
1) features for channel_campaign nodes
2) features for device nodes
3) neighboring device features for each channel_campaign node
4) neighboring channel_campaign features for each super device node
"""
#set number of neighbors for channel and device for different stages
num_neibr = 50 if train_stage else 800
sup_dev_num_neibr = 20 if train_stage else 50
edge_matrix = self.edge_matrix
# channel_vertices and device_vertices must be numpy array
# original features
neibr_feats_tensor = []
sup_dev_neibr_feats_tensor = []
minibatch_channel_feats = self.concat_channel_feats(self.channel_feats[channel_vertices]).to(Device) # shape of (minibatch,feats_num_channel)
minibatch_device_feats = self.concat_device_feats(self.device_feats[device_vertices]).to(Device) # shape of (minibatch,feats_num_device)
channel_vertices = channel_vertices.cpu().numpy()
# for each channel vertices, get neighbor devices and its features
for i in channel_vertices:
if train_stage:
neibr_feats = self.adj_indice_to_feat_mat(self.channel_neibr_cache[i],num_neibr,i)
neibr_feats_tensor.append(neibr_feats)
else:
neibr_feats = self.adj_indice_to_feat_mat(self.channel_neibr_test_cache[i],num_neibr,i)
neibr_feats_tensor.append(neibr_feats)
neibr_feats_tensor = torch.cat(neibr_feats_tensor,dim = 0).to(Device) # neibr_feats_tensor would be a tensor of shape: (Minibatch,num_neibr,feats_num_device)
# if use botspot++, for each device node, retrieve its super device index and get its neighboring channel_campaign node features
if self.use_enhanced_botspot:
for i in device_vertices:
sup_dev_neibr_feats = self.adj_indice_to_feat_mat_super_device(sup_dev_num_neibr,i,train_stage)
sup_dev_neibr_feats_tensor.append(sup_dev_neibr_feats)
sup_dev_neibr_feats_tensor = torch.cat(sup_dev_neibr_feats_tensor,dim = 0).to(Device)
return minibatch_channel_feats,minibatch_device_feats,neibr_feats_tensor,sup_dev_neibr_feats_tensor
return minibatch_channel_feats,minibatch_device_feats,neibr_feats_tensor,-1
def adj_indice_to_feat_mat(self,col,neibr_num,indice):
"""
input:
indice: indice of channel_campaign node
neibr_num: num of sample size
col: a boolean array of a col given channel_campaign node of $indice
return:
neighboring device features
"""
if indice in self.device_idx_cache:
ind = self.device_idx_cache[indice]
else:
ind = np.where(col==True)[0]
self.device_idx_cache[indice] = ind
if len(ind)>neibr_num:
np.random.shuffle(ind)
ind_subset = ind[:neibr_num]
else:
ind_subset = np.random.choice(ind,size = neibr_num,replace = True)
dev_feats = self.concat_device_feats(self.device_feats[ind_subset].to(Device))
return dev_feats.unsqueeze(0)
def adj_indice_to_feat_mat_super_device(self,neibr_num,device_idx,train_stage = True):
"""
input:
device_idx: index of device node
neibr_num: num of sample size
return:
neighboring channel_campaign features
"""
i = self.device_package[device_idx]
if train_stage:
sup_dev_neibr = list(self.super_device_neibr_cache[i])
else:
sup_dev_neibr = list(self.super_device_neibr_test_cache[i])
if len(sup_dev_neibr)>neibr_num:
random.shuffle(sup_dev_neibr)
c = self.concat_channel_feats(self.channel_feats[sup_dev_neibr[:neibr_num]].to(Device))
else:
sup_dev_neibr = np.random.choice(np.asarray(sup_dev_neibr),size = neibr_num,replace = True)
c = self.concat_channel_feats(self.channel_feats[sup_dev_neibr].to(Device))
return c.unsqueeze(0)
def get_leaf_from_light_gbm(self, left_vertices, right_vertices, use_self_attn=False):
# get leaf indices from gbm model and embed into dense matrix
output_leaf_emb = []
chan_data = self.channel_feats[left_vertices]
dev_data = self.device_feats[right_vertices]
try:
edge_data = np.hstack((chan_data, dev_data))
except:
edge_data = torch.cat((chan_data, dev_data),
dim=1) # edge feature is the concatenation of channel_node and device_node
edge_data = edge_data.cpu().numpy()
# N = len(left_vertices)
if len(edge_data.shape)==1:
edge_data = edge_data.reshape((1,-1))
pred_leaf = self.gbm_best_model.predict_proba(edge_data, pred_leaf=True)
pred_leaf = torch.from_numpy(pred_leaf).long().to(Device)
for i in range(pred_leaf.shape[1]):
# print (self.leaf_emb_models[i](pred_leaf[:, i]).shape)
output_leaf_emb.append(self.leaf_emb_models[i](pred_leaf[:, i]).unsqueeze(1))
# ret = torch.cat(output_leaf_emb, dim=1).to(Device) # leaf node concatenation
if not use_self_attn:
ret = torch.cat(output_leaf_emb, dim=1).mean(axis=1).to(Device) # leaf node mean pooling
return ret
else:
ret = torch.cat(output_leaf_emb, dim=1).to(Device)
out = self.self_attn_module(ret)
return out
def forward(self,edges,train_stage = True):
minibatch_channel_feats,minibatch_device_feats,neibr_feats_tensor,sup_dev_neibr_feats_tensor = self.sample_minibatch(edges[:,0],edges[:,1],train_stage)
# forward device feats
if not self.use_enhanced_botspot:
device_out = self.dev_relu2(self.dev_linear2(self.dev_dropout1(self.dev_relu1(self.dev_linear1(minibatch_device_feats)))))
channel_conv = self.channel_linear1(minibatch_channel_feats)
dev_conv = self.dev_linear1(neibr_feats_tensor).mean(dim=1) # share dev_linear1
fuse_conv = self.fusion_dropout1(self.fusion_relu1(self.fusion_linear1(torch.cat((channel_conv,dev_conv),dim=1))))
if not self.use_gbm:
h = self.concat_linear3(self.concat_relu2(self.concat_linear2(self.concat_relu1(self.concat_linear1(torch.cat((fuse_conv,device_out),dim=1))))))
else:
leaf_out = self.get_leaf_from_light_gbm(edges[:,0],edges[:,1])
h = self.concat_linear3(self.concat_relu2(self.concat_linear2(self.concat_relu1(self.concat_linear1(torch.cat((fuse_conv,device_out,leaf_out),dim=1))))))
return torch.sigmoid(h)
else:
channel_conv = self.channel_linear1(minibatch_channel_feats)
dev_conv = self.dev_linear1(neibr_feats_tensor).mean(dim=1) # share dev_linear1
fuse_conv = self.fusion_dropout1(self.fusion_relu1(self.fusion_linear1(torch.cat((channel_conv,dev_conv),dim=1))))
# device side conv:
sup_dev_conv = self.dev_linear1(minibatch_device_feats)
sup_channel_conv = self.channel_linear1(sup_dev_neibr_feats_tensor).mean(dim=1)
sup_fuse_conv = self.sup_dev_fusion_dropout1(self.sup_dev_fusion_relu1(self.sup_dev_fusion_linear1(torch.cat((sup_channel_conv,sup_dev_conv),dim=1))))
if not self.use_gbm:
h = self.concat_linear3(self.concat_relu2(self.concat_linear2(self.concat_relu1(self.concat_linear1(torch.cat((fuse_conv,sup_fuse_conv),dim=1))))))
else:
leaf_out = self.get_leaf_from_light_gbm(edges[:,0],edges[:,1])
h = self.concat_linear3(self.concat_relu2(self.concat_linear2(self.concat_relu1(self.concat_linear1(torch.cat((fuse_conv,sup_fuse_conv,leaf_out),dim=1))))))
return torch.sigmoid(h)
if __name__ == '__main__':
bot_preprocess = BotSpotTrans(f'{dataset_name}/train.csv', f'{dataset_name}/test.csv')
edge_index_train = bot_preprocess.edge_index_train
edge_index_test = bot_preprocess.edge_index_test
chan_feats = bot_preprocess.combin_matrix
device_feats = bot_preprocess.device_matrix
device_package = list(device_feats[:, -1])
device_feats = device_feats[:, :-1]
if use_gbm:
gbm_model = make_dataset(chan_feats,device_feats,-1,1,edge_index_train,edge_index_test)
else:
gbm_model = None
botspot_model = BotSpot(edge_index_train, edge_index_test, chan_feats, device_feats,device_package = device_package,use_enhance_botspot = use_botspotpp,use_gbm = use_gbm,gbm_model = gbm_model)
tr_dset = TensorDataset(torch.from_numpy(edge_index_train))
tr_dloader = DataLoader(tr_dset, batch_size=500, shuffle=True)
test_dset = TensorDataset(torch.from_numpy(edge_index_test))
test_dloader = DataLoader(test_dset, batch_size=500, shuffle=False)
optimizer = optim.Adam(botspot_model.parameters(), lr=2e-4, weight_decay=3e-6)
# model_states = torch.load('model_results_1014/botspotpp/model_checkpoints_4.pt')
# botspot_model.load_state_dict(model_states)
botspot_model.to(Device)
try:
os.mkdir('model_results')
except:
pass
_ = train(botspot_model, tr_dloader, test_dloader, optimizer, 10, save_name='model_results')
| tianyao-aka/BotSpot | vectorized_botspot.py | vectorized_botspot.py | py | 26,961 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"... |
36840895879 | from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import sys
import yaml
# Get relative imports to work when the package is not installed on the PYTHONPATH.
if __name__ == "__main__" and __package__ is None:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=wrong-import-position
from buildscripts.ciconfig.evergreen import parse_evergreen_file
# pylint: enable=wrong-import-position
# Name of map to search for in the variables map in evergreen.yml
MONGOCRYPTD_VARIANTS = "mongocryptd_variants"
PUSH_TASK_NAME = "push"
def can_validation_be_skipped(evg_config, variant):
"""
Determine if the given build variant needs to be validated.
A build variant does not need to be validated if it does not run the 'push' task or
if it does not exist in the configuration (it is dynamically created).
:param evg_config: Evergreen configuration.
:param variant: Build variant to check.
:return: True if validation can be skipped.
"""
variant_config = evg_config.get_variant(variant)
if not variant_config:
return True
if PUSH_TASK_NAME not in variant_config.task_names:
return True
return False
def read_variable_from_yml(filename, variable_name):
"""
Read the given variable from the given yaml file.
:param filename: Yaml file to read from.
:param variable_name: Variable to read from file.
:return: Value of variable or None.
"""
with open(filename, 'r') as fh:
nodes = yaml.safe_load(fh)
variables = nodes["variables"]
for var in variables:
if variable_name in var:
return var[variable_name]
return None
def main():
# type: () -> None
"""Execute Main Entry point."""
parser = argparse.ArgumentParser(description='MongoDB CryptD Check Tool.')
parser.add_argument('file', type=str, help="etc/evergreen.yml file")
parser.add_argument('--variant', type=str, help="Build variant to check for")
args = parser.parse_args()
expected_variants = read_variable_from_yml(args.file, MONGOCRYPTD_VARIANTS)
if not expected_variants:
print("ERROR: Could not find node %s in file '%s'" % (MONGOCRYPTD_VARIANTS, args.file),
file=sys.stderr)
sys.exit(1)
evg_config = parse_evergreen_file(args.file)
if can_validation_be_skipped(evg_config, args.variant):
print(f"Skipping validation on buildvariant {args.variant}")
sys.exit(0)
if args.variant not in expected_variants:
print("ERROR: Expected to find variant %s in list %s" % (args.variant, expected_variants),
file=sys.stderr)
print(
"ERROR: Please add the build variant %s to the %s list in '%s'" %
(args.variant, MONGOCRYPTD_VARIANTS, args.file), file=sys.stderr)
sys.exit(1)
sys.exit(0)
if __name__ == '__main__':
main()
| mongodb/mongo | buildscripts/validate_mongocryptd.py | validate_mongocryptd.py | py | 2,966 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_num... |
3206846810 | """ Provides an intermediate datastructure close to the PDF, which is used
to transform the lines in the pdf into TimeTable objects. """
from __future__ import annotations
import logging
from operator import attrgetter
from pathlib import Path
from typing import Callable, TypeAlias
from pdf2gtfs.config import Config
from pdf2gtfs.datastructures.pdftable.container import (
Column, FieldContainer, Row)
from pdf2gtfs.datastructures.pdftable.enums import (
ColumnType, FieldType,
RowType,
)
from pdf2gtfs.datastructures.pdftable.lists import ColumnList, RowList
from pdf2gtfs.datastructures.timetable.table import TimeTable
logger = logging.getLogger(__name__)
Tables: TypeAlias = list["PDFTable"]
Rows: TypeAlias = list[Row]
Cols: TypeAlias = list[Column]
Splitter: TypeAlias = Callable[[Tables, list[FieldContainer]], None]
class PDFTable:
""" Describes a table, using coordinates, rows and columns. """
def __init__(self, rows: Rows = None, columns: Cols = None):
self.rows = rows or []
self.columns = columns or []
@property
def rows(self) -> RowList:
""" The rows of the table. """
return self._rows
@rows.setter
def rows(self, rows: Rows | RowList) -> None:
if isinstance(rows, RowList):
self._rows = rows
else:
self._rows = RowList.from_list(self, rows)
@property
def columns(self) -> ColumnList:
""" The columns of the table. """
return self._columns
@columns.setter
def columns(self, columns: Cols | ColumnList):
if isinstance(columns, ColumnList):
self._columns = columns
else:
self._columns = ColumnList.from_list(self, columns)
@property
def empty(self) -> bool:
""" Whether either columns or rows are empty. """
return self.columns.empty or self.rows.empty
def generate_columns_from_rows(self) -> None:
""" Create columns from the given rows. """
def _generate_single_field_columns() -> Cols:
""" Generate single-field columns from the rows. """
field_columns = [Column.from_field(self, field)
for row in rows for field in row]
return sorted(field_columns, key=attrgetter("bbox.x0"))
def _merge_overlapping_columns(field_columns: Cols) -> Cols:
""" Merges overlapping field_columns. """
first_field = field_columns.pop(0).fields[0]
cols: Cols = [Column.from_field(self, first_field)]
for column in field_columns:
previous_column = cols[-1]
# Begin new column, if the current column is not overlapping.
if previous_column.bbox.x1 <= column.bbox.x0:
cols.append(Column.from_field(self, column.fields[0]))
continue
previous_column.add_field(column.fields[0])
return cols
rows = self.rows.of_types(
[RowType.DATA, RowType.ANNOTATION, RowType.ROUTE_INFO])
if not rows:
return
columns = _merge_overlapping_columns(_generate_single_field_columns())
self.columns = columns
def fix_split_stopnames(self) -> None:
""" Fix stop names (indented or starting with a delimiter),
indicating they use the same city/POI as the previous stop. """
stop_column = self.columns.of_type(ColumnType.STOP)[0]
first_field_idx = 0
reference_field = None
for i, field in enumerate(stop_column.fields):
if field.row.type == RowType.DATA:
reference_field = field
first_field_idx = i
break
if not reference_field:
return
for field in stop_column.fields[first_field_idx:]:
# Don't update the reference_field, in case field is indented.
if not field.fix_name_if_split(reference_field):
reference_field = field
def to_timetable(self) -> TimeTable:
""" Creates a TimeTable containing the values of this table. """
return TimeTable.from_pdf_table(self)
def get_header_from_column(self, column: Column) -> str:
""" Returns the header text of the given column. """
for row in self.rows.of_type(RowType.HEADER):
for i, field in enumerate(row, 1):
next_field = row.fields[i] if i < len(row.fields) else None
if not next_field or next_field.bbox.x0 >= column.bbox.x1:
return field.text
return ""
def add_row_or_column(self, obj: Row | Column) -> None:
""" Add the object to either rows or columns, based on its type. """
if isinstance(obj, Row):
self.rows.add(obj)
return
self.columns.add(obj)
@staticmethod
def _split_at(splitter: list, splitter_func: Splitter) -> Tables:
tables: Tables = [PDFTable() for _ in splitter]
splitter_func(tables, splitter)
for table in tables:
for row in table.rows:
row.update_type()
table.generate_columns_from_rows()
return tables
def split_at_stop_columns(self) -> Tables:
""" Return a list of tables with each having a single stop column. """
def splitter(tables: Tables, splitter_columns: list) -> None:
""" Split the given tables at the given splitter_columns. """
for row in self.rows:
splits = row.split_at(splitter_columns)
for table, split in zip(tables, splits):
if not split.fields:
continue
table.add_row_or_column(split)
return self._split_at(self.columns.of_type(ColumnType.STOP), splitter)
def split_at_header_rows(self) -> Tables:
""" Return a list of tables with each having a single header row. """
def splitter(tables: Tables, splitter_rows: list) -> None:
""" Splits the current tables' rows such that each split starts
with a splitter_row and assigns each split to a table. """
rows_list = [[] for _ in splitter_rows]
first_is_splitter = self.rows[0] in splitter_rows
idx = -1 if first_is_splitter else 0
for row in self.rows:
if row in splitter_rows:
idx += 1
rows_list[idx].append(row)
for table, rows in zip(tables, rows_list, strict=True):
table.rows = rows
return self._split_at(self.rows.of_type(RowType.HEADER), splitter)
def to_file(self, fname: Path) -> None:
""" Export the PDFTable to the given path in the csv format. """
def escape_field_text(text: str) -> str:
""" Wrap field text that contains a comma in quotes.
Also removes any existing quotes.
"""
text = text.replace('"', "").strip()
if "," in text:
return f'"{text}"'
return text
def get_header_row_column_idx(idx: int = 0, *, row_field=None) -> int:
""" Get the column index of the fields of a header row. """
if not row_field:
row_field = row.fields[idx]
for i, col_ in enumerate(self.columns):
if col_.bbox.x0 > row_field.bbox.x0:
return i
# Last column.
return len(self.columns) - 1
row_strings = [[] for _ in self.rows]
seen_fields = []
for col in self.columns:
last_row_id = -1
for field in col:
row_id = field.row.index
for row in self.rows.objects[last_row_id + 1: row_id]:
row_strings[row.index].append("")
row_strings[row_id].append(escape_field_text(field.text))
seen_fields.append(field)
last_row_id = row_id
# Fix missing trailing commas.
for row_string in row_strings:
while len(row_string) < self.columns.index(col) + 1:
row_string.append("")
# Add fields without a column.
for row in self.rows:
for field in row:
if field.type != FieldType.HEADER or field in seen_fields:
continue
idx = get_header_row_column_idx(row_field=field)
row_strings[row.index][idx] = field.text
# Export.
table_str = "\n".join(
[",".join(row_string) for row_string in row_strings
if any(row_string)]) + "\n"
with open(fname, "w") as fil:
fil.write(table_str)
def split_rows_into_tables(rows: Rows) -> Tables:
""" Split raw rows into (multiple) PDFTable. A new table is created,
whenever the distance to the previous line is higher than the defined
max_row_distance. Tables with too few rows are dropped with an info. """
def log_skipped_rows() -> None:
""" Log the rows that are dropped. """
row_str = ",\n\t\t ".join([str(r) for r in current_rows])
logger.debug(f"Dropped rows:\n\tDistance: {y_distance:.2f}"
f"\n\tRows: {row_str}")
tables = []
current_rows = [rows[0]]
for row in rows[1:]:
if not row.fields:
continue
y_distance = row.y_distance(current_rows[-1])
if y_distance > Config.max_row_distance:
if len(current_rows) < Config.min_row_count:
log_skipped_rows()
current_rows = [row]
continue
logger.debug(f"Distance between rows: {y_distance}")
tables.append(PDFTable(current_rows))
current_rows = []
current_rows.append(row)
else:
if len(current_rows) < Config.min_row_count:
log_skipped_rows()
return tables
tables.append(PDFTable(current_rows))
return tables
def cleanup_tables(tables: Tables) -> Tables:
""" Fix some errors in the tables. """
for table in tables:
for row in table.rows:
row.update_type()
tables = split_tables_with_multiple_header_rows(tables)
for table in tables:
table.generate_columns_from_rows()
return split_tables_with_multiple_stop_columns(tables)
def split_tables_with_multiple_header_rows(tables: Tables) -> Tables:
""" Merge table with the previous one, if it has no header row. """
if not tables:
return []
merged_tables: Tables = [tables[0]]
for table in tables[1:]:
header_rows = table.rows.of_type(RowType.HEADER)
if len(header_rows) > 1:
merged_tables += table.split_at_header_rows()
continue
if header_rows:
merged_tables.append(table)
continue
merged_tables[-1].rows.merge(table.rows)
return merged_tables
def split_tables_with_multiple_stop_columns(tables: Tables) -> Tables:
""" If a table has multiple stop columns, it will be split into two
tables with each having only one stop column. """
split_tables = []
for table in tables:
stop_columns = table.columns.of_type(ColumnType.STOP)
if len(stop_columns) <= 1:
split_tables.append(table)
continue
split_tables += table.split_at_stop_columns()
return split_tables
| heijul/pdf2gtfs | src/pdf2gtfs/datastructures/pdftable/pdftable.py | pdftable.py | py | 11,459 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "typing.TypeAlias",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.TypeAlias",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "pdf2gtfs.datastru... |
71038539943 | import torch.nn as nn
import torch.nn.functional as F
from ...utils import CONFIG
from ..encoders.resnet_enc import ResNet_D
from ..ops import GuidedCxtAtten, SpectralNorm
class ResGuidedCxtAtten(ResNet_D):
def __init__(self, block, layers, norm_layer=None, late_downsample=False):
super(ResGuidedCxtAtten, self).__init__(block, layers, norm_layer, late_downsample=late_downsample)
first_inplane = 3 + CONFIG.model.trimap_channel
self.shortcut_inplane = [first_inplane, self.midplanes, 64, 128, 256]
self.shortcut_plane = [32, self.midplanes, 64, 128, 256]
self.shortcut = nn.ModuleList()
for stage, inplane in enumerate(self.shortcut_inplane):
self.shortcut.append(self._make_shortcut(inplane, self.shortcut_plane[stage]))
self.guidance_head = nn.Sequential(
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(3, 16, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(16),
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(16, 32, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(32),
nn.ReflectionPad2d(1),
SpectralNorm(nn.Conv2d(32, 128, kernel_size=3, padding=0, stride=2, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(128)
)
self.gca = GuidedCxtAtten(128, 128)
# initialize guidance head
for layers in range(len(self.guidance_head)):
m = self.guidance_head[layers]
if isinstance(m, nn.Conv2d):
if hasattr(m, "weight_bar"):
nn.init.xavier_uniform_(m.weight_bar)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_shortcut(self, inplane, planes):
return nn.Sequential(
SpectralNorm(nn.Conv2d(inplane, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes),
SpectralNorm(nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)),
nn.ReLU(inplace=True),
self._norm_layer(planes)
)
def forward(self, x):
out = self.conv1(x)
out = self.bn1(out)
out = self.activation(out)
out = self.conv2(out)
out = self.bn2(out)
x1 = self.activation(out) # N x 32 x 256 x 256
out = self.conv3(x1)
out = self.bn3(out)
out = self.activation(out)
im_fea = self.guidance_head(x[:,:3,...]) # downsample origin image and extract features
if CONFIG.model.trimap_channel == 3:
unknown = F.interpolate(x[:,4:5,...], scale_factor=1/8, mode='nearest')
else:
unknown = F.interpolate(x[:,3:,...].eq(1.).float(), scale_factor=1/8, mode='nearest')
x2 = self.layer1(out) # N x 64 x 128 x 128
x3= self.layer2(x2) # N x 128 x 64 x 64
x3, offset = self.gca(im_fea, x3, unknown) # contextual attention
x4 = self.layer3(x3) # N x 256 x 32 x 32
out = self.layer_bottleneck(x4) # N x 512 x 16 x 16
fea1 = self.shortcut[0](x) # input image and trimap
fea2 = self.shortcut[1](x1)
fea3 = self.shortcut[2](x2)
fea4 = self.shortcut[3](x3)
fea5 = self.shortcut[4](x4)
return out, {'shortcut':(fea1, fea2, fea3, fea4, fea5),
'image_fea':im_fea,
'unknown':unknown,
'offset_1':offset}
if __name__ == "__main__":
from networks.encoders.resnet_enc import BasicBlock
m = ResGuidedCxtAtten(BasicBlock, [3, 4, 4, 2])
for m in m.modules():
print(m)
| XiaohangZhan/deocclusion | demos/GCAMatting/networks/encoders/res_gca_enc.py | res_gca_enc.py | py | 3,937 | python | en | code | 764 | github-code | 36 | [
{
"api_name": "encoders.resnet_enc.ResNet_D",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "utils.CONFIG.model",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "utils.CONFIG",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torc... |
4002922556 | #! /usr/bin/env python3
import json
import time
import rclpy
from rclpy.node import Node
from rclpy.executors import MultiThreadedExecutor
from robot_navigator import BasicNavigator, TaskResult
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Float64MultiArray, Int32, String, Bool, Int32MultiArray
class ProtMsg:
def __init__(self) -> None:
self.data = 0
# Holds the current pose of the robot
current_x = 0.0
current_y = 0.0
current_yaw_angle = 0.0
# Holds the pose of the current tag
current_tag_x = 0.0
current_tag_y = 0.0
current_tag_yaw_angle = 0.0
# Holds next machine on the queue
next_machine = None
# Holds the current state of the battery
this_battery_state = 0.0
prev_battery_state = 0.0
# Flag for detecting the change in the battery state
low_battery = False
low_battery_min_threshold = 0.15
# File with the current machines positions
_file = open("Data/machine_positions.json")
machines = json.load(_file)
#PoC Variables
sim_list = [0,675,674,673,672,671,670,669,668]
idx = 0
first_run = True
nav_start = 0
pick_start = 0
class PickingNavigator(Node):
"""
Navigates and request docking routines
"""
def __init__(self):
super().__init__("navigate_to_machines")
self.current_mc_tag_pub = self.create_publisher(Int32, "/current_tag", 10)
self.pick_feedback_sub = self.create_publisher(
Int32MultiArray, "/picked_machine", 10
)
self.next_mc_request_sub = self.create_publisher(
Bool, "/machine_number_request", 10
)
self.start_docking_pub = self.create_publisher(Bool, "/start_docking", 10)
self.next_machine = self.create_subscription(
Int32, "/next_machine_number", self.set_next_machine, 10
)
self.picking_feedback = self.create_subscription(
Int32MultiArray, "/pick_feedback", self.update_feedback, 10
)
self.request_machine_sim()
# Execution timer
timer_period = 0.02
self.timer = self.create_timer(timer_period, self.navigate_to_station)
# Declare distance metrics in meters
self.distance_goal_tolerance = 0.01
self.reached_distance_goal = False
# Declare angle metrics in radians
self.heading_tolerance = 0.10
self.yaw_goal_tolerance = 0.05
# Picking related variables
self.picking_feedback = [0, 0]
self.picking = False
self.nav_start = 0
self.pick_start = 0
# Navigation setup and feedback
self.nav = BasicNavigator()
self.nav.waitUntilNav2Active()
self.done_navigating = False
self.curr_record = [0]*4
def set_next_machine(self, msg):
"""
Sets the next machine in queue
"""
global next_machine
for mc_name, mc in machines.items():
try:
if mc["Number"] == msg.data:
next_machine = mc
except:
print("Machine number not found in 'machine_positions.json'")
def update_feedback(self, msg):
"""
Gets picking feedback from the docking node
"""
self.picking_feedback = msg.data
def request_machine_sim(self):
global idx
mc = sim_list[idx]
idx = idx+1 if idx <= len(sim_list)-2 else 0
msg = ProtMsg()
msg.data = mc
print("NEXT MACHINE: ")
print(mc)
self.set_next_machine(msg)
def navigate_to_station(self):
"""
Main routine: Goes to the selected machine and requests a docking sequence.
Will end with "SUCCESS" flag if the docking sequence returns a "Done" value.
Will stop with "FAIL" flag if the machine is unreachable/unconnected or if the
docking sequence fails.
"""
global next_machine, first_run
if self.done_navigating:
result = self.nav.getResult()
if result == TaskResult.SUCCEEDED:
if not self.picking:
self.curr_record[0] = 1
self.curr_record[1] = time.time()-self.nav_start
start_picking_msg = Bool()
start_picking_msg.data = False if (next_machine!=None and next_machine["Number"]==0) else True
self.start_docking_pub.publish(start_picking_msg)
self.picking_feedback = [0, 0]
self.picking = True
self.pick_start = time.time()
else:
if self.picking_feedback == [0, 0]:
print("Waiting for picking feedback")
else:
if self.picking_feedback[1] == 0: fb = "Failed"
elif self.picking_feedback[1] == 1: fb = "Picked"
elif self.picking_feedback[1] == 2: fb = "At home"
else: fb = "Unknown feedback code"
self.curr_record[2] = time.time()-self.pick_start
self.curr_record[3] = self.picking_feedback[1]
with open("records.txt", "a") as file_object:
file_object.write(f"\n{round(self.curr_record[0],2)},{round(self.curr_record[1],2)},{round(self.curr_record[2],2)},{round(self.curr_record[3],2)}")
print(fb)
self.picking = False
self.done_navigating = False
next_machine = None
self.request_machine_sim()
elif result == TaskResult.CANCELED:
print("Goal was canceled!")
self.done_navigating = False
next_machine = None
self.request_machine_sim()
elif result == TaskResult.FAILED:
print("Goal failed!")
self.done_navigating = False
next_machine = None
self.request_machine_sim()
else:
print("Goal has an invalid return status!")
self.done_navigating = False
next_machine = None
self.request_machine_sim()
return None
msg_req = Bool()
msg_req.data = True
self.next_mc_request_sub.publish(msg_req)
if next_machine == None:
return None
current_machine = next_machine
tag_msg = Int32()
tag_msg.data = int(current_machine["Tag"])
self.current_mc_tag_pub.publish(tag_msg)
self.get_logger().info("Navigate to station")
# You may use the navigator to clear or obtain costmaps
self.nav.clearAllCostmaps() # also have clearLocalCostmap() and clearGlobalCostmap()
# global_costmap = self.nav.getGlobalCostmap()
# local_costmap = self.nav.getLocalCostmap()
# Set the robot's goal pose
goal_pose = PoseStamped()
goal_pose.header.frame_id = "map"
goal_pose.header.stamp = self.nav.get_clock().now().to_msg()
goal_pose = PoseStamped()
goal_pose.header.frame_id = "map"
goal_pose.header.stamp = self.nav.get_clock().now().to_msg()
goal_pose.pose.position.x = current_machine["Position"][0]
goal_pose.pose.position.y = current_machine["Position"][1]
goal_pose.pose.position.z = 0.0
goal_pose.pose.orientation.x = 0.0
goal_pose.pose.orientation.y = 0.0
if current_machine["Position"][2] == "D":
z_pos = -0.7071068
elif current_machine["Position"][2] == "E":
z_pos = 0.7071068
else:
z_pos = 0.0
goal_pose.pose.orientation.z = z_pos # current_machine["Position"][2]
goal_pose.pose.orientation.w = 0.7071068
# Go to the goal pose
self.nav.goToPose(goal_pose)
self.nav_start = time.time()
i = 0
# Keep doing stuff as long as the robot is moving towards the goal
while not self.nav.isTaskComplete():
# Do something with the feedback
i = i + 1
feedback = self.nav.getFeedback()
if feedback and i % 5 == 0:
print(
"Distance remaining: "
+ "{:.2f}".format(feedback.distance_remaining)
+ " meters."
)
# Some navigation timeout to demo cancellation
# if Duration.from_msg(feedback.navigation_time) > Duration(seconds=1800.0):
# self.nav.cancelNav()
self.done_navigating = True
class BatteryStateSubscriber(Node):
"""
Subscriber node to the current battery state
"""
def __init__(self):
# Initialize the class using the constructor
super().__init__("battery_state_subscriber")
# Create a subscriber
# This node subscribes to messages of type
# sensor_msgs/BatteryState
self.subscription_battery_state = self.create_subscription(
Int32, "/battery_soc", self.get_battery_state, 10
)
def get_battery_state(self, msg):
"""
Update the current battery state.
"""
global this_battery_state
global prev_battery_state
global low_battery
prev_battery_state = this_battery_state
this_battery_state = float(msg.data) / 100
# Check for low battery
if (
prev_battery_state >= low_battery_min_threshold
and this_battery_state < low_battery_min_threshold
):
low_battery = True
class PoseSubscriber(Node):
"""
Subscriber node to the current 2D pose of the robot
"""
def __init__(self):
# Initialize the class using the constructor
super().__init__("pose_subscriber")
# Create a subscriber
# This node subscribes to messages of type
# std_msgs/Float64MultiArray
self.subscription_pose = self.create_subscription(
Float64MultiArray, "/bot_2d_pose", self.get_pose, 1
)
self.tag_pose_sub = self.create_subscription(
Float64MultiArray, "/tag_2d_pose", self.get_tag_pose, 1
)
def get_pose(self, msg):
global current_x
global current_y
global current_yaw_angle
current_2d_pose = msg.data
current_x = current_2d_pose[0]
current_y = current_2d_pose[1]
current_yaw_angle = current_2d_pose[2]
def get_tag_pose(self, msg):
global current_tag_x
global current_tag_y
global current_tag_yaw_angle
current_2d_pose = msg.data
current_tag_x = current_2d_pose[0]
current_tag_y = current_2d_pose[1]
current_tag_yaw_angle = current_2d_pose[2]
def main(args=None):
rclpy.init(args=args)
try:
picking_navigator = PickingNavigator()
battery_state_subscriber = BatteryStateSubscriber()
pose_subscriber = PoseSubscriber()
executor = MultiThreadedExecutor(num_threads=4)
executor.add_node(picking_navigator)
executor.add_node(battery_state_subscriber)
executor.add_node(pose_subscriber)
try:
executor.spin()
finally:
executor.shutdown()
picking_navigator.destroy_node()
battery_state_subscriber.destroy_node()
pose_subscriber.destroy_node()
finally:
# Shutdown
rclpy.shutdown()
if __name__ == "__main__":
main()
| maurocs/knitting-amr | Navigation routine/AA_DOCK.py | AA_DOCK.py | py | 11,537 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "rclpy.node.Node",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "std_msgs.msg.Int32",
"line_number": 59,
"usage_type": "argument"
},
{
"api_name": "std_msgs.msg.Int32Mu... |
17443182071 | #!/usr/bin/env python
"""
A thermometer display stimulus.
This module contains a class implementing a thermometer display stimulus.
"""
from __future__ import absolute_import, print_function, division
from builtins import *
__author__ = 'Florian Krause <florian@expyriment.org>'
__version__ = ''
__revision__ = ''
__date__ = ''
import pygame
from expyriment.stimuli import Rectangle
from expyriment.stimuli._visual import Visual
class ThermometerDisplay(Visual):
"""A class implementing a thermometer display."""
def __init__(self, state, goal=None, size=(128,340), nr_segments=10,
gap=3, frame_line_width=20, active_colour=(150,150,150),
inactive_colour=(0,0,0), frame_colour=(100,100,100),
goal_colour=(0,255,0), gap_colour=(255,255,255),
position=None):
"""Initializing a thermometer display.
Parameters:
-----------
state : int
The state of the thermometer in percent.
goal : int, optional
The goal state indication in percent.
size : (int, int), optional
The size of the thermometer display (default=(128,340)).
nr_segments : int, optional
The number of segments to use (default=10).
gap : int, optional
The visual gap between the individual segments (default=3).
frame_line_width : int, optional
The line width of the frame around the thermometer display
(default=20).
active_colour : (int, int, int), optional
The colour of the active segments (default=(150,150,150)).
inactive_colour : (int, int, int), optional
The colour of the inactive segments (default=(0,0,0)).
frame_colour : (int, int, int), optional
The colour of the frame around the thermometer display
(default=(100,100,100)).
goal_colour : (int, int, int), optional
The colour of the goal indicator (default=(0,255,0)).
gap_colour : (int, int, int), optional
The gap colour of the thermometer stimulus
(default=(255,255,255)).
position : (int, int), optional
The position of the thermometer display).
"""
self._state = state
self._goal = goal
self._size = size
self._nr_segments = nr_segments
self._gap = gap
self._frame_line_width = frame_line_width
self._active_colour = active_colour
self._inactive_colour = inactive_colour
self._frame_colour = frame_colour
self._goal_colour = goal_colour
self._gap_colour = gap_colour
self._position = position
Visual.__init__(self, position)
_getter_exception_message = "Cannot set {0} if surface exists!"
@property
def state(self):
"""Getter for state."""
return self._state
@state.setter
def state(self, value):
"""Setter for state."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format("state"))
else:
self._state = value
@property
def goal(self):
"""Getter for goal."""
return self._goal
@goal.setter
def goal(self, value):
"""Setter for goal."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format("goal"))
else:
self._goal = value
@property
def size(self):
"""Getter for size."""
return self._size
@size.setter
def size(self, value):
"""Setter for size."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format("size"))
else:
self._size = value
@property
def nr_segments(self):
"""Getter for nr_segments."""
return self._nr_segments
@nr_segments.setter
def nr_segments(self, value):
"""Setter for nr_segments."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"nr_segments"))
else:
self._nr_segments = value
@property
def gap(self):
"""Getter for gap."""
return self._gap
@gap.setter
def gap(self, value):
"""Setter for gap."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format("gap"))
else:
self._gap = value
@property
def frame_line_width(self):
"""Getter for frame_line_width."""
return self._frame_line_width
@frame_line_width.setter
def frame_line_width(self, value):
"""Setter for frame_line_width."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"frame_line_width"))
else:
self._frame_line_width = value
@property
def active_colour(self):
"""Getter for active_colour."""
return self._active_colour
@active_colour.setter
def active_colour(self, value):
"""Setter for active_colour."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"active_colour"))
else:
self._active_colour = value
@property
def inactive_colour(self):
"""Getter for inactive_colour."""
return self._inactive_colour
@inactive_colour.setter
def inactive_colour(self, value):
"""Setter for inactive_colour."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"inactive_colour"))
else:
self._inactive_colour = value
@property
def frame_colour(self):
"""Getter for frame_colour."""
return self._frame_colour
@frame_colour.setter
def frame_colour(self, value):
"""Setter for frame_colour."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"frame_colour"))
else:
self._frame_colour = value
@property
def goal_colour(self):
"""Getter for goal_colour."""
return self._goal_colour
@goal_colour.setter
def goal_colour(self, value):
"""Setter for goal_colour."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"goal_colour"))
else:
self._goal_colour = value
@property
def gap_colour(self):
"""Getter for gap_colour."""
return self._gap_colour
@gap_colour.setter
def gap_colour(self, value):
"""Setter for gap_colour."""
if self.has_surface:
raise AttributeError(
ThermometerDisplay._getter_exception_message.format(
"gap_colour"))
else:
self._gap_colour = value
def _create_surface(self):
"""Create the surface of the stimulus."""
surface = pygame.surface.Surface((self._size[0] +
self._frame_line_width,
self._size[1] +
self._frame_line_width),
pygame.SRCALPHA).convert_alpha()
if self._gap_colour is not None:
surface.fill(self._gap_colour)
parts = []
width = self._size[0] - self._frame_line_width - \
2 * self._frame_line_width % 2
height = self._size[1] - self._frame_line_width - \
2 * self._frame_line_width % 2 + 1
s_height = int(height - (self._nr_segments + 1) *
self._gap) // self._nr_segments
for x in range(self._nr_segments):
if x < self._state / 100.0 * self._nr_segments:
colour = self._active_colour
else:
colour = self._inactive_colour
s = Rectangle((width - self._gap * 2,
s_height), colour=colour,
position=(0, -height // 2 + s_height // 2 + x *
height // self.nr_segments + self._gap))
parts.append(s)
parts.append(Rectangle(self._size, colour=self._frame_colour,
line_width=self._frame_line_width,
position=self._position))
parts.append(Rectangle((width - self._gap, height - self._gap * 2),
colour=self._gap_colour,
line_width=self._gap,
position=self._position))
if self._goal is not None:
x = int(round(self._goal / 100.0 * self._nr_segments)) - 1
current_y_pos = -height // 2 + s_height // 2 + \
x * height // self._nr_segments + self._gap
above_y_pos = -height // 2 + s_height // 2 + \
(x + 1) * height // self._nr_segments + self._gap
g1 = Rectangle((self._frame_line_width * 1.25,
self._frame_line_width * 1.25),
colour=self._goal_colour,
position=(
-self._size[0] // 2 - self._frame_line_width // 2,
(current_y_pos + above_y_pos) // 2))
g2 = Rectangle((self._frame_line_width * 1.25,
self._frame_line_width * 1.25),
colour=self._goal_colour,
position=(
self._size[0] // 2 + self._frame_line_width // 2,
(current_y_pos + above_y_pos) // 2))
g1.rotate(45)
g2.rotate(45)
parts.append(g1)
parts.append(g2)
for stim in parts:
stim.rect = pygame.Rect((0, 0), stim.surface_size)
surface_size = surface.get_size()
stim.rect.center = [stim.position[0] + surface_size[0] // 2,
- stim.position[1] + surface_size[1] // 2]
surface.blit(stim._get_surface(), stim.rect)
return surface
if __name__ == "__main__":
from ... import control
control.set_develop_mode(True)
control.defaults.event_logging = 0
exp = control.initialize()
thermometer_display = ThermometerDisplay(50, 50)
thermometer_display.present()
#exp.clock.wait(1000)
exp.keyboard.wait()
| expyriment/expyriment-stash | extras/expyriment_stimuli_extras/thermometerdisplay/_thermometerdisplay.py | _thermometerdisplay.py | py | 10,984 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "expyriment.stimuli._visual.Visual",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "expyriment.stimuli._visual.Visual.__init__",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "expyriment.stimuli._visual.Visual",
"line_number": 79,
"usag... |
42998098746 | from __future__ import annotations
from typing import TYPE_CHECKING, Any
from ..network import ID_TOKEN_AUTHENTICATOR
from .by_plugin import AuthByPlugin, AuthType
from .webbrowser import AuthByWebBrowser
if TYPE_CHECKING:
from ..connection import SnowflakeConnection
class AuthByIdToken(AuthByPlugin):
"""Internal IdToken Based Authentication.
Works by accepting an id_toke and use that to authenticate. Only be used when users are using EXTERNAL_BROWSER_AUTHENTICATOR
"""
@property
def type_(self) -> AuthType:
return AuthType.ID_TOKEN
@property
def assertion_content(self) -> str:
return self._id_token
def __init__(
self,
id_token: str,
application: str,
protocol: str | None,
host: str | None,
port: str | None,
**kwargs,
) -> None:
"""Initialized an instance with an IdToken."""
super().__init__(**kwargs)
self._id_token: str | None = id_token
self._application = application
self._protocol = protocol
self._host = host
self._port = port
def reset_secrets(self) -> None:
self._id_token = None
def prepare(self, **kwargs: Any) -> None:
pass
def reauthenticate(
self,
*,
conn: SnowflakeConnection,
**kwargs: Any,
) -> dict[str, bool]:
conn.auth_class = AuthByWebBrowser(
application=self._application,
protocol=self._protocol,
host=self._host,
port=self._port,
timeout=conn.login_timeout,
backoff_generator=conn._backoff_generator,
)
conn._authenticate(conn.auth_class)
conn._auth_class.reset_secrets()
return {"success": True}
def update_body(self, body: dict[Any, Any]) -> None:
"""Idtoken needs the authenticator and token attributes set."""
body["data"]["AUTHENTICATOR"] = ID_TOKEN_AUTHENTICATOR
body["data"]["TOKEN"] = self._id_token
| snowflakedb/snowflake-connector-python | src/snowflake/connector/auth/idtoken.py | idtoken.py | py | 2,028 | python | en | code | 511 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "by_plugin.AuthByPlugin",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "by_plugin.AuthType.ID_TOKEN",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_n... |
70874373865 | from __future__ import annotations
from contextlib import contextmanager
from datetime import datetime
from decimal import Decimal
from typing import ContextManager, List, Text
from uuid import UUID, uuid4
import sqlalchemy as sa
import sqlalchemy_utils as sa_utils
from injector import inject
from sqlalchemy import func
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import Session
from application.bus import Event, EventBus
from application.db import Base, Transaction
from currency import BTCRate, Currency
from .interface import BuyOrder, Repository
from .types import BtcAmountColumn, FiatAmountColumn
class DBBuyOrder(Base, BuyOrder):
def __init__(
self,
request_id: UUID,
paid: Decimal,
bought: Decimal,
exchange_rate: BTCRate,
) -> None:
self.id = uuid4()
self.request_id = request_id
self.paid = paid
self.bought = bought
self.exchange_rate = exchange_rate
__tablename__ = "buy_orders"
_db_id: int = sa.Column("id", sa.Integer, primary_key=True)
id: UUID = sa.Column("order_id", sa_utils.UUIDType, unique=True)
request_id: UUID = sa.Column(sa_utils.UUIDType, unique=True)
paid: Decimal = sa.Column(FiatAmountColumn)
bought: Decimal = sa.Column(BtcAmountColumn)
_currency: Currency = sa.Column(
"currency", sa.Enum(Currency, naive_enum=False)
)
_price: Decimal = sa.Column("price", FiatAmountColumn)
_rate_date: datetime = sa.Column("rate_date", sa.DateTime)
@hybrid_property
def exchange_rate(self) -> BTCRate:
return BTCRate(
price=self._price,
currency=self._currency,
on_date=self._rate_date,
)
@exchange_rate.setter
def exchange_rate(self, value: BTCRate) -> None:
self._price = value.price
self._currency = value.currency
self._rate_date = value.on_date
when_created: datetime = sa.Column(
"when_created", sa.DateTime, default=datetime.utcnow, index=True
)
_when_updated: datetime = sa.Column(
"when_updated", sa.DateTime, onupdate=datetime.utcnow,
)
def __repr__(self) -> Text:
return (
"<"
f"{self.__class__.__name__}"
f" db_id={self._db_id}"
f" order_id={self.id}"
f" request_id={self.request_id}"
f" amount={self.bought!s}BTC"
">"
)
@inject
class ORMRepository(Repository):
_session: Session
_pending_events: List[Event]
def __init__(self, transaction: Transaction, bus: EventBus) -> None:
self._transaction = transaction
self._bus = bus
@contextmanager
def lock(self) -> ContextManager[Decimal]:
events = []
with self._transaction() as session:
self._session = session
self._pending_events = events
lock = f"LOCK TABLE {DBBuyOrder.__tablename__} IN EXCLUSIVE MODE;"
session.execute(lock)
yield self._calculate_balance()
del self._session
del self._pending_events
for event in events:
self._bus.emit(event)
def _calculate_balance(self) -> Decimal:
balance, = self._session.query(func.sum(DBBuyOrder.bought)).one()
return Decimal(balance or 0)
def create(
self,
request_id: UUID,
paid: Decimal,
bought: Decimal,
with_rate: BTCRate,
) -> BuyOrder:
entry = DBBuyOrder(
request_id=request_id,
paid=paid,
bought=bought,
exchange_rate=with_rate,
)
self._session.add(entry)
self._session.flush()
return entry
def emit(self, event: Event) -> None:
self._pending_events.append(event)
def get_order_id(self, for_request_id: UUID) -> BuyOrder | None:
with self._transaction() as session:
result = (
session.query(DBBuyOrder.id)
.filter_by(request_id=for_request_id)
.one_or_none()
)
return result and result[0]
| lzukowski/workflow | src/ordering/db/buy_order.py | buy_order.py | py | 4,190 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "application.db.Base",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "interface.BuyOrder",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
... |
19090796379 | from __future__ import annotations # postpone evaluation of annotations
import pandas as pd
from itertools import combinations, product, permutations, repeat, chain
import numpy as np
import math
from sklearn.metrics import auc
import pickle
import gzip
from scipy.special import stdtr
from scipy.stats import pearsonr
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.regression.linear_model import RegressionResultsWrapper
from statsmodels.stats.diagnostic import het_breuschpagan, het_white
from statsmodels.stats.multitest import multipletests
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import normalize, QuantileTransformer, StandardScaler
from scipy.stats import chi2, shapiro, f, skew
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib
import time as t
from typing import Iterable,Any
import multiprocessing as mp
from resources import *
import resources
def getMemoryOfVars():
"""Prints the top ten vars in terms of memory usage
Returns:
_type_: _description_
"""
import sys
def sizeof_fmt(num, suffix='B'):
''' by Fred Cirera, https://stackoverflow.com/a/1094933/1870254, modified'''
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
for name, size in sorted(((name, sys.getsizeof(value)) for name, value in list(
locals().items())), key= lambda x: -x[1])[:10]:
print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
def read(filepath: str):
"""Load one of the pickled objects stored in filepath
Args:
filepath (str): filepath of pickled, gziped object
Returns:
_type_: object
"""
import sys
sys.path.append('resources')
with gzip.open(filepath, 'rb') as f:
object = pickle.load(f)
f.close()
object.filepath = filepath
return object
def ols(Y,X):
"""Give the OLS regression results for a given set of features and a response variable, like R Summary
Args:
Y (_type_): Data with dependent variables
X (_type_): Data with independent variables
Returns:
OLS: Regressor object, used for predictions for example
RegressionResultsWrapper: Results of the regression, like betas and all types of test statistics
"""
X = sm.add_constant(X)
regressor = sm.OLS(Y, X)
results = regressor.fit()
print(results.summary())
return regressor, results
def covMatrixAnalysis(data:pd.DataFrame)-> tuple[float, float]:
"""Gives a summary of the covariance matrix in terms of the % of and the mean of the non diagonal elements,
useful to see if the observations are independent or not, if the mean of the non diagonal elements is close to zero and
that of the diagonal is close to 1 then the observations are independent
Args:
data (pd.DataFrame): Dataframe from which the covariance matrix will be calculated
Returns:
tuple[float, float]: (Mean of the diagonal, mean of the non diagonal elements of the covariance matrix)
"""
cov = np.cov(data)
upperTriangular = np.triu(cov, k=1)
upperTriangular = upperTriangular.flatten()
upperTriangular = [round(x, 8)==0 for x in upperTriangular]
nonDiagonalMetric:float = ((sum(upperTriangular))/len(upperTriangular)) * 100
#I only did for the upper triangular because the covariance matrix is symmetric
diagonal = np.diag(cov)
diagonalMetric = [round(x, 4) == 1 for x in diagonal]
diagonalMetric = ((sum(diagonalMetric))/len(diagonalMetric)) * 100
return diagonalMetric, nonDiagonalMetric
def calcR(XY) -> tuple[float, float]:
X,Y = XY
corr, pValue = pearsonr(X, Y)
return corr, pValue
def calcMahalanobis(y:pd.DataFrame, data: pd.DataFrame, cov:pd.DataFrame=None):
y_mu = (y - np.mean(data, axis=0)).T # In Liner Algebra The covariates are usually row vectors in dataframes these are usually column vectors
if not cov:
cov = np.cov(data.values.T)
inv_covmat = np.linalg.inv(cov)
left = np.dot(y_mu.T, inv_covmat)
mahal = np.dot(left, y_mu).diagonal()
pValue = chi2.sf(mahal, 3)
return np.sqrt(mahal), pValue
def anovaExpTable(anovaData:pd.DataFrame, y:str, x:str)->tuple[float, float]:
"""Get the eta squared and the p value of the anova model, indicating the effect size of a category
towards the y, and the corresponding p value
Args:
anovaData (pd.DataFrame): Dataframe with x and y
y (str): name of the y column
x (str): name of the x column
Returns:
tuple[float, float]: (eta squared, p value)
"""
#fit anova models with the small residuals and the large residuals
anova = smf.ols(f'{y} ~ C({x})', data=anovaData).fit()
# Get the tables (Dataframes) with the ANOVA results
anovaTable = sm.stats.anova_lm(anova, typ=2)
#Calculate the eta squared for each model, the effect size of each drug towards the residuals
etaSquared = (anovaTable[:-1]['sum_sq'].values/sum(anovaTable['sum_sq'].values))[0]
#Add additional info to the results
fPValue = anovaTable['PR(>F)'].values[0]
return (etaSquared, fPValue)
class MatrixData:
def __init__(self, filepath: str = None, data: pd.DataFrame = None, **readerKwargs):
self.data = data
self.filepath = filepath
if filepath is not None:
self.data: pd.DataFrame = pd.read_csv(filepath, **readerKwargs)
elif data is not None:
self.data: pd.DataFrame = data.copy()
def __str__(self) -> str:
return str(self.data)
def write(self, filepath:str = None):
if filepath is not None:
self.filepath = filepath
if self.filepath is not None:
filepath = self.filepath
assert filepath is not None, 'No filepath provided'
with gzip.open(filepath, 'wb') as f:
pickle.dump(self,f)
f.close()
def query(self, query: str, inplace : bool = False) -> pd.DataFrame|None:
if not inplace:
return self.data.query(query).copy()
else:
self.data = self.data.query(query).copy()
def compare(self, other: MatrixData, querySelf: str, queryOther: str, key: str = 'PPI') -> pd.DataFrame:
"""Query two PairwiseCorrMatrices with independent queries and get the merged dataframe as result
Args:
other (PairwiseCorrMatrix): another PairwiseCorrMatrix object
querySelf (str): Query to apply on the self object
queryOther (str): Query to apply on the other object
key (str, optional): In what column name should the merge be happening on, acting as consesual index. Defaults to 'PPI'.
Returns:
pd.DataFrame: _description_
"""
left: pd.DataFrame = self.query(querySelf).copy()
right: pd.DataFrame = other.query(queryOther).copy()
return left.merge(right, on=key)
def getUniqueSetValues(self, feature: str):
"""Returns a set of unique values from a feature of a dataframe
Args:
feature (str): The column name to extract the unique set of values
Returns:
set: The uniqye set of values in a column of a Dataframe
dict: Dictionary with the keys are the unique values in a column and the values(of the dict) as the number of
occurances in of each value(of the feature)
"""
data = self.data.copy()
setOfValues = data[feature].unique()
setOfValues = set(setOfValues)
# occurancesDict = data.groupby(feature).count().to_dict()[feature]
return setOfValues, None
class ppiDataset(MatrixData):
def __init__(self, name:str, filepath:str = None, data: pd.DataFrame = None, **readerKwargs):
super().__init__(filepath, data, **readerKwargs)
self.name = name
self.ppis = set()
def getPPIs(self, dataset:str) -> set:
"""Get the curated and observed ppis of a certains external PPI dataset
Args:
dataset (str): name of the dataset. It can either be 'corum', 'biogrid' or 'string'
Returns:
set: Set of tuples with proteinA and proteinB as putative PPI pairs
"""
data = self.data.copy()
ppiSet = None
allowedDatasets = ['corum', 'biogrid', 'string']
assert dataset in allowedDatasets, f"dataset not supported use one of the following 'corum', 'biogrid', 'string', got: {dataset}"
if dataset == allowedDatasets[0]:
def combinationsOfProteins(complx):
if ';' in list(complx['subunits(Gene name)']):
complx['proteinTuple'] = list(permutations(
complx['subunits(Gene name)'].split(';'), 2))
return complx
data = data.apply(
lambda complx: combinationsOfProteins(complx), axis=1)
ppiList = list(data.dropna()['proteinTuple'])
ppiSet = {item for sublist in ppiList for item in sublist}
elif (dataset == allowedDatasets[1]):
# Filter Biogrid for certain parameters
# Only allow ppis documented by physical interaction
# Filter out Homedymers which are not object of our study
data = data.query("`Experimental System Type` == 'physical' and `Official Symbol Interactor A` != `Official Symbol Interactor B`").copy()
ppis = list(zip(data['Official Symbol Interactor A'], data['Official Symbol Interactor B'])) + list(zip(data['Official Symbol Interactor B'], data['Official Symbol Interactor A']))
ppiSet = set(ppis)
elif (dataset == allowedDatasets[2]):
ppis = list(zip(data['proteinA'], data['proteinB'])) + list(zip(data['proteinB'], data['proteinA']))
ppiSet = set(ppis)
self.ppis = ppiSet
return ppiSet
class ProteinsMatrix(MatrixData):
def __init__(self, filepath: str = None, data: pd.DataFrame = None, **readerKwargs):
super().__init__(filepath, data, **readerKwargs)
def __str__(self) -> str:
string = super().__str__()
try:
for summary in self.normSummary:
string += "\n" + f"For a Global p-value of {summary[0]:.2f} and a threshold of {summary[1]} samples, {summary[2]:.2f}% of the proteins are not normally distributed"
for summary in self.homoskeSummary:
string += "\n" + f"For a Global p-value of {summary[0]:.2f} and a threshold of {summary[1]} samples, {summary[2]:.2f}% in {summary[3]} PPIs of after linear regression don't follow the homoskedasticity assumption"
return string
except:
return string
def pearsonCorrelations(self, columnName: str, proteomicsType:str,thresholdInteraction:int = 5) -> PairwiseCorrMatrix:
"""Calculate the pearson correlations and corresponding p-value, displaying them in a pairwise manner, returning an instance of the PairwiseCorrMatrix class
Args:
columnName (str): Name given to the df column with the correlation metric
proteomicsType (str): The type of proteomics data to use to create the pearson correlation matrix
thresholdInteraction (int, optional): The minimum number of coincident samples for it to be considered a putative PPI
Returns:
PairwiseCorrMatrix: final data structure with all the information regarding pairwise correlations
"""
data = self.data.copy()
permutationsColumns = list(combinations(data.columns, 2))
pairwiseCorr:dict = {'PPI':[],'proteinA': [],'proteinB': [], columnName: [], 'p-value':[], 'counts':[]}
for (proteinAName, proteinBName) in permutationsColumns:
proteinA = data[proteinAName].dropna(axis=0)
proteinB = data[proteinBName].dropna(axis=0)
samples = proteinA.index.intersection(proteinB.index)
proteinA = proteinA.loc[samples]
proteinB = proteinB.loc[samples]
count = len(proteinA)
if count < thresholdInteraction :
continue
(corr, pValue) = pearsonr(proteinA, proteinB)
pairwiseCorr['PPI'].append(proteinAName + ';' + proteinBName)
pairwiseCorr['proteinA'].append(proteinAName)
pairwiseCorr['proteinB'].append(proteinBName)
pairwiseCorr[columnName].append(corr)
pairwiseCorr['p-value'].append(pValue)
pairwiseCorr['counts'].append(count)
index = pairwiseCorr.pop('PPI')
pairwiseCorrelations = PairwiseCorrMatrix(proteomicsType, None, pd.DataFrame(pairwiseCorr, index=index), [columnName, 'p-value'], [False, True])
assert pairwiseCorrelations.data.shape[0] > 0, 'No pairwise correlations were found, try lowering the thresholdInteraction, or adding more samples'
return pairwiseCorrelations
def calculateResidues(self, ppis: Iterable[set[str]]) -> ResiduesMatrix:
proteomics = self.data.copy()
tlsResList = []
correlationsTLSMahal = []
# are farthest from the linear regession line, hence, have greatest TLS and so are samples of interest where the PPI likely is having some biomolecular role.
# So it would be interesting to see afterwards if that sample has a responsiveness to a drug all the other samples do not meaning
# we are in a presence of a PPI that might be correlated to a feature, a certain drug responsiveness
for index, ppi in enumerate(ppis):
proteinA = ppi.split(';')[0]
proteinB = ppi.split(';')[1]
X = proteomics.loc[:,proteinA].dropna(axis=0) #Get X and Y data
Y = proteomics.loc[:,proteinB].dropna(axis=0)
samplesInCommon = X.index.intersection(Y.index)
if len(samplesInCommon) < 5: # We have no important information from protein protein interactions with less than 5 coocorence
print(f"{ppi} was not used for calculating the tls Redidues Matrix because it did not have at least 5 samples")
continue
X=X.loc[samplesInCommon] #Locate samples that both have some value (not nan)
Y=Y.loc[samplesInCommon]
# TLS Residues
meanX = X.mean()
meanY = Y.mean()
meanErrorX = X - meanX #Start Calculating quantities to minimise for the tlsCoef Calculation
meanErrorY = Y - meanY
meanSqErrorX = meanErrorX ** 2
meanSqErrorY = meanErrorY ** 2
u = meanSqErrorX.sum()
v = meanSqErrorY.sum()
r = (meanErrorX * meanErrorY).sum()
w = v - u
tlsCoef = (w + (w**2 + r**2)**0.5) #Calculating tls Coefficient
tlsCoef = tlsCoef/r
intercept = meanY - (tlsCoef * meanX) #Intercept of linear fit
predY = intercept + (tlsCoef * X)
residues = abs(Y - predY) # TLS Residues in absolute val
# Malahanobis Distance
proteinExpression = pd.concat([X,Y], axis=1)
mahalDist, mahalPValues = calcMahalanobis(proteinExpression, proteinExpression, None)
dfData = {(ppi, 'TLS'): residues,
(ppi,'malahanobis'): mahalDist,
(ppi,'mahalPValue'): mahalPValues,
}
correlationsTLSMahal.append(pearsonr(residues, mahalDist)[0])
residues = pd.DataFrame(dfData)
if index == 0:
tlsResData = residues
else:
tlsResList.append(residues)
print('Statistical Description of the Pearson Correlation between TLS and Mahalanobis distance \n' + str(pd.DataFrame(correlationsTLSMahal).describe()))
tlsResData = tlsResData.join(tlsResList, how='outer')
return ResiduesMatrix(None,tlsResData)
@classmethod
def whitening(cls, proteinData:ProteinsMatrix, covMatrix:ProteinsMatrix, saveIndexes:bool= False) -> tuple[Any, Any]:
"""Whitten the proteinData, so that each covariate has the same variance, which is equal to one
Args:
covMatrix (_type_):The Protein Data to calculate the covariance Matrix in order to withen the data
Returns:
tuple[Any, Any]: the warped X to be used in the linear regression and the intercept, which is the mean of the variances of a sample across all gene symbols
"""
#invert it the covariance matrix
proteinData = proteinData.data
covMatrix = covMatrix.data
samplesCommon = proteinData.index.intersection(covMatrix.index)
if len(samplesCommon) < len(proteinData.index):
print(f"We have lost {len(proteinData.index) - len(samplesCommon)} samples ")
proteinData = proteinData.loc[samplesCommon]
covMatrix = covMatrix.loc[samplesCommon]
covMatrix = np.cov(covMatrix)
covMatrix = np.linalg.inv(covMatrix)
# Decompose it with Cholesky, returning the lower triangular matrix of the positive definite matrix covMatrix, because cov(x1,x2) == cov(x2,x1)
cholsigmainvMean = np.linalg.cholesky(covMatrix)
# Whittening transformation, we codify our data into a space where each the variance of each covariate is the same and equal to one,
# so we are kind like normalising it, in fact that's exactly what we are doing ~ N(0,I) As they call it warping...
warpedProteinsMean = proteinData.T.values @ cholsigmainvMean
# The intercept is the sum of the choleski decomposition matrix, when the sum equals to one that sample is independent from all the others
warpedIntereceptMean = cholsigmainvMean.T.sum(axis=0)
if saveIndexes:
warpedProteinsMean = pd.DataFrame(warpedProteinsMean.T, columns=proteinData.columns, index=proteinData.index)
return warpedProteinsMean, warpedIntereceptMean
def getGLSCorr(self, proteomicsType:str, pValues: bool = True, listCovMatrix:list[pd.DataFrame] = None, coefColumnName :str = 'coef') -> PairwiseCorrMatrix:
"""Get the GLS coeficents between each Protein X and Y, where X != Y, these will measure the correlation between each protein.
But this method differs from the pearsonCorrelations since it has underneath a GLM where the covariance matrix can be any specified.
This covariance matrix will transform both X and y of the proteinData.data. By default this covariance matrix is calculated with proteinData.data
Where we get the covariance between samples, as a similarity measure between samples. This tweak is speacilly important if our residuals
correlate with X, meaning we are not in the normal OLS case.
Args:
proteomicsType (str): The type of proteomics data to used to calculate the GLS Coefficients
pValues (bool, optional): Add the pValues of each gls Coefficient to the output data. Defaults to True.
listCovMatrix (list[pd.DataFrame], optional):List of matrices to use to calculate covariance, if only one is required insert [matrix]. Defaults to None.
coefColumnName (str, optional): Name to appear on the Dataframe' Column of glsCoefs. Defaults to 'glsCoefficient'.
Returns:
PairwiseCorrMatrix: Data structure with all above information
"""
proteinData = self.data.copy()
if listCovMatrix is not None:
# The matrix used for the covariance is not the one used as an X in the linear regression,
# we used this to allow the use of the genomic matrix as the matrix where the cov of each sample would be calculated,
# since the tecidual bias of proteomic expression would be more present in the genome,
# since this layer is upstream and more connected to the information the cell has from its diferentiation,
# so it becomes easier to distinguish samples by tissue, by using its genomic data.
# Therefore calculating the cov of genomics.csv could give a more correct value on the true covariation of two samples
# and correct the possible correlation of PPIs that are simply due to the diferent baseline expression of proteins belonging in the ppi
listCovMatrix.append(proteinData)
samplesInCommon = pd.concat(listCovMatrix, axis=1, join='inner').index #Conactenate all matrices and select their indices which would be the common ones
listCovMatrix.pop(-1) # Remove recently added proteinData to the list
proteinData = proteinData.loc[samplesInCommon, :]
proteinDataMean = proteinData.fillna(proteinData.mean())
proteinDataMean.dropna(axis=1, inplace=True) #We delete all columns with nan values because after all this preprocessing they must be completetly empty columns
covMatrix = np.zeros((len(samplesInCommon), len(samplesInCommon)))
for dataForCov in listCovMatrix:
dataForCov = dataForCov.loc[samplesInCommon, :]
dataForCov.dropna(axis=1, thresh=round(proteinData.shape[0] * 0.2), inplace=True) #How we are handling missing data, there should be at leats 20% of missingness for a collumn to be dropable
dataForCov = dataForCov.fillna(dataForCov.mean())
# calculate covariance matrix in order to see the covariace between samples, and notice tecidual patterns codified in the samples
dataForCov = np.cov(dataForCov)
covMatrix = covMatrix + dataForCov
else: # The matrix used for the covariance is the same as that used for X in linear regression, we are whitening while taking into account the covariation of our data in X
proteinData.dropna(axis=1, thresh=round(proteinData.shape[0] * 0.2), inplace=True) #We require that a protein has about 20% missingness for it to be considered a dropable column
proteinDataMean = proteinData.fillna(proteinData.mean())
dataForCov = proteinDataMean
# calculate covariance matrix in order to see the covariace between samples, and notice tecidual patterns codified in the samples
covMatrix = np.cov(dataForCov)
proteinNames = proteinDataMean.columns.str.split(' ').str.get(0).to_numpy()
proteinNames = [protein1 + ';' + protein2 for i, protein1 in enumerate(proteinNames) for j, protein2 in enumerate(proteinNames) if j > i]
proteinsA, proteinsB = [proteinPair.split(";")[0] for proteinPair in proteinNames], [proteinPair.split(";")[1] for proteinPair in proteinNames]
warpedProteinsMean, warpedIntereceptMean = ProteinsMatrix.whitening(self, self)
def linear_regression(warped_screens, warped_intercept):
GLS_coef = np.empty((len(warped_screens), len(warped_screens)))
GLS_se = np.empty((len(warped_screens), len(warped_screens)))
ys = warped_screens.T
for proteinIndex in range(len(warped_screens)):
X = np.stack((warped_intercept, warped_screens[proteinIndex]), axis=1)
coef, residues = np.linalg.lstsq(X, ys, rcond=None)[:2]
df = warped_screens.shape[1] - 2
GLS_coef[proteinIndex] = coef[1]
GLS_se[proteinIndex] = \
np.sqrt(np.linalg.pinv(X.T @ X)[1, 1] * residues / df)
return GLS_coef, GLS_se
GLS_coef, GLS_se = linear_regression(warpedProteinsMean, warpedIntereceptMean)
df = warpedProteinsMean.shape[1] - 2
# Construct new PairwiseGLSCoefs Matrix
glsCoefs = GLS_coef[np.triu_indices(GLS_coef.shape[0], k=1)]
if pValues: #We might not want to add pValues so that we use less memory
GLS_p = 2 * stdtr(df, -np.abs(GLS_coef / GLS_se))
np.fill_diagonal(GLS_p, 1)
glsPValues = GLS_p[np.triu_indices(GLS_p.shape[0], k=1)]
pairwiseCorrData = pd.DataFrame(
{'proteinA':proteinsA, 'proteinB': proteinsB, coefColumnName: glsCoefs, 'p-value': glsPValues}, index=proteinNames)
else:
pairwiseCorrData = pd.DataFrame(
{'proteinA':proteinsA, 'proteinB': proteinsB, coefColumnName: glsCoefs}, index=proteinNames)
pairwiseCorrData = PairwiseCorrMatrix(proteomicsType,None, pairwiseCorrData, [coefColumnName, 'p-value'], [False, True])
pairwiseCorrData.data.index.name = 'PPI'
return pairwiseCorrData
def getGLSR(self, proteomicsType:str, coefColumnName :str = "GLM's R") -> PairwiseCorrMatrix:
proteinData = self.data.copy()
# Get protein names in pair and individually
proteinNames = proteinData.columns.str.split(' ').str.get(0).to_numpy()
proteinNames = [protein1 + ';' + protein2 for i, protein1 in enumerate(proteinNames) for j, protein2 in enumerate(proteinNames) if j > i]
proteinsA, proteinsB = [proteinPair.split(";")[0] for proteinPair in proteinNames], [proteinPair.split(";")[1] for proteinPair in proteinNames]
#Transform data into Cholesky Space with Whitening
withenedProteins, withenedInterecept = ProteinsMatrix.whitening(self, self, saveIndexes=True)
#Get X and Y for Pearson correlation, now with the withened data
X = [withenedProteins[proteinA].to_numpy() for proteinA in proteinsA]
Y = [withenedProteins[proteinB].to_numpy() for proteinB in proteinsB]
# Calculate Pearson correlation in parallel
results = {
'PPI': proteinNames,
'proteinA': proteinsA,
'proteinB':proteinsB,
coefColumnName: [],
'p-value': []}
with mp.Pool(CPUS) as process:
pararelResults = process.map(calcR, list(zip(X, Y)))
for corr, pValue in pararelResults:
results[coefColumnName].append(corr)
results['p-value'].append(pValue)
# Extract the results
index = results.pop('PPI')
pairwiseCorrData = pd.DataFrame(results, index=index)
pairwiseCorr = PairwiseCorrMatrix(proteomicsType, None, pairwiseCorrData, [coefColumnName, 'p-value'], [False, True])
return pairwiseCorr
def shapiroWilksTest(self, thresh: int = 5, globalPVal:float = 0.01) -> None:
"""Performs the shappiro wilks test for each protein present and stores it in self.normTest and in self.normSummary
Args:
thresh (int): Minimum number of samples to perform the test
globalPVal (float): Global p-value threshold to reject the null hypothesis
Returns:
Nonetype: None
"""
data = self.data.copy()
shapiroResults = {}
for protein in data:
proteinData = data[protein].dropna()
if len(proteinData) >= thresh:
stat, pVal = shapiro(proteinData)
else:
stat, pVal = np.nan, np.nan
shapiroResults[protein] = {'stat': stat, 'p-value': pVal}
shapiroResults = pd.DataFrame(shapiroResults).T
self.normTest = shapiroResults
shapiroResults = shapiroResults.dropna(axis=0)
pValues = shapiroResults['p-value']
rejected, correctedPVals, _, _ = multipletests(pValues, alpha=globalPVal, method='fdr_bh')
numNonNormal = np.sum(rejected)
ratioNonNormal = (numNonNormal/shapiroResults.shape[0]) * 100 # The smaller the pValue the more likely it is that the data is not normally distributed, we thence reject the null hypothesis that the data is normally distributed
try: # If the atribute doesn't exist we create it
self.normSummary.add((globalPVal, thresh, ratioNonNormal))
except:
self.normSummary = set()
self.normSummary.add((globalPVal, thresh, ratioNonNormal))
print(self.normSummary)
def whiteTest(self, thresh: int = 5, ppis:set = None , globalPVal:float = 0.05) -> None:
"""Executes the White test (where H_0 is the homoskedasticity of the residuals, thence if the residuals are invariant to the change of x,
meaning that for y~x, x explains most of the variability, leaving no confounding factor out of the regression equation), for a global pValue, and with PPIs with at leats thresh samples
Args:
thresh (int, optional): The minimum number of samples required to calculate the test. Defaults to 5.
globalPVal (float, optional): p-value subject to the bonferroni correction. Defaults to 0.01.
Returns:
Nonetype: None
"""
data = self.data.copy()
if ppis is None:
ppis: permutations[tuple[str, str]] = permutations(data.columns, 2)
whiteResults = {}
for x,y in ppis:
#Getting and Processing Data
xData = data[x].dropna()
yData = data[y].dropna()
samplesCommon = xData.index.intersection(yData.index)
if len(samplesCommon) >= thresh:
xData = xData.loc[samplesCommon]
yData = yData.loc[samplesCommon]
#Fitting the model (Classical Linear regression)
regressor = LinearRegression()
regressor.fit(xData.values.reshape(-1,1), yData.values.reshape(-1,1))
yPred = regressor.predict(xData.values.reshape(-1,1))
#Calculating the residuals
residuals = yData.values.reshape(-1,1) - yPred
#Add intercept to the data
xData = pd.DataFrame({'x':xData, 'intercept':np.ones(len(xData))})
#Calculating the White test
stat, pValue,_,_ = het_breuschpagan(residuals, xData)
whiteResults[(x,y)] = {'stat': stat, 'p-value': pValue}
whiteResults = pd.DataFrame(whiteResults).T.reset_index(names=['proteinA', 'proteinB']) # This allows for a compatible merging with PaiwiseCorrMatrix objects
self.homoskeTest = whiteResults
numPPIs = whiteResults.shape[0]
whiteResults = whiteResults.dropna(axis=0)
pValues = whiteResults['p-value']
rejected, _, _, _ = multipletests(pValues, alpha=globalPVal, method='fdr_bh')
numHeteroske = np.sum(rejected)
ratioHeteroske = (numHeteroske/numPPIs) * 100 # The smaller the pValue the more likely it is that the residuals are heteroskedastic, we thence reject the null hypothesis that the residuals are invariant when regressed with x, homoskedastic
try: # If the atribute doesn't exist we create it
self.homoskeSummary.add((globalPVal, thresh, ratioHeteroske))
except:
self.homoskeSummary = set()
self.homoskeSummary.add((globalPVal, thresh, ratioHeteroske, numPPIs))
print(self.homoskeSummary)
def independenceSamples(self) -> None:
data = self.data.copy()
diagonalMean, nonDiagonalMean = covMatrixAnalysis(data)
whitenedData, _ = ProteinsMatrix.whitening(self, self, True)
whiteDiagonalMean, whiteNonDiagonalMean = covMatrixAnalysis(whitenedData)
self.samplesIndep = f"Percentage of 1's in Diagonal: {diagonalMean}\nPercentage of 0's in non diagonal: {nonDiagonalMean}\nPercentage of 1's in Diagonal, after whitening: {whiteDiagonalMean}\nPercentage of 0's in non diagonal, after whitening: {whiteNonDiagonalMean}"
print(self.samplesIndep)
def plotPxPyDrug(self, drug:str, ppi:str, drugResponse: DrugResponseMatrix, filepath:str, **annotationArgs):
drugResponse = drugResponse.binrise(inplace=False).T # The drug response matrix is binarised
samplesCommon = self.data.index.intersection(drugResponse.index) # We only want to plot the samples that are in both matrices
assert len(samplesCommon) > 0, 'There are no samples in common between the protein data and the drug response data'
drugResponse = drugResponse.loc[samplesCommon, drug]
proteinData = self.data.loc[samplesCommon, :]
if len(ppi.split('-')) > 0:
ppi = ppi.split('-')[0]
pxName = ppi.split(';')[0]
px = proteinData[pxName]
pyName = ppi.split(';')[1]
py = proteinData[pyName]
data = pd.DataFrame({'px': px, 'py': py, 'drugResponse': drugResponse})
colors = {0: 'green', 1: 'red'}
#change dot size onm the scatter plot
plt.figure(figsize=(15, 15))
plt.scatter(data['px'], data['py'], c=data['drugResponse'].map(colors), label=[colors.values(), colors.keys()])
plt.title('Protein expression \n with Drug Response, >50% [drugMax]')
plt.xlabel(str(pxName))
plt.ylabel(str(pyName))
if annotationArgs is not None:
plt.annotate(**annotationArgs)
legend = [plt.Line2D([0], [0], marker='o', color=colors[key], label='Drug Response = ' + str(key)) for key in colors]
plt.legend(handles = legend, fontsize=20, framealpha=0.2)
plt.savefig(filepath)
plt.close()
def plotPxPy3DimContinous(self, interactionName:str, ppi:str, interactor: pd.DataFrame, typeOfInteraction:str, filepath:str|None, **annotationArgs):
"""Scatter Plot with the protein expression of two proteins and the interactor data as third dim, in a continous manner, unlike plotPxPyDrug.
Additionally, the plot can be annotated with the arguments passed to the function. And the Interaction data will be represented with a colorbar and
and an overlaying Kernel Distribution Estimation, and with size.
Args:
interactionName (str): name of the interaction present in the interactor Dataframe
ppi (str): Protein-Protein Interaction (Px;Py)
interactor (pd.DataFrame): Dataframe that represents the third dim, should have samples as index and interactions as columns
typeOfInteraction (str): Type of interaction, can be 'drug response' or 'gene essentiality'
filepath (str): filepath to save the plot
"""
samplesCommon = self.data.index.intersection(interactor.index) # We only want to plot the samples that are in both matrices
assert len(samplesCommon) > 0, 'There are no samples in common between the protein data and the drug response data'
interactor = interactor.loc[samplesCommon, [interactionName]]
# sort drug response so that the smallest shows up first, ascending==True
interactor = interactor.sort_values(interactionName, ascending=False)
samplesCommon = interactor.index
proteinData = self.data.loc[samplesCommon, :]
if len(ppi.split('-')) > 0:
ppi = ppi.split('-')[0]
pxName = ppi.split(';')[0]
pyName = ppi.split(';')[1]
plottingData = proteinData[[pxName, pyName]]
#standardize the data
plottingData = pd.DataFrame(StandardScaler().fit_transform(plottingData) ,columns=plottingData.columns, index=plottingData.index)
#Add interactor
plottingData = plottingData.join(interactor, how = 'inner')
print(plottingData)
plt.figure(figsize=(10, 10))
scatter = sns.scatterplot(data=plottingData, x=pxName, y=pyName, hue=interactionName, size=interactionName, palette="viridis", alpha=1, edgecolor='none', s=15)
norm = matplotlib.colors.Normalize(vmin=interactor.min(), vmax=interactor.max())
# Add Colour Map
sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
sm.set_array([])
scatter.get_legend().remove()
scatter.figure.colorbar(sm, label=typeOfInteraction)
plt.title(f'Protein expression \n with {typeOfInteraction}')
plt.xlabel(str(pxName))
plt.ylabel(str(pyName))
if annotationArgs is not None:
plt.annotate(**annotationArgs)
if filepath is not None:
plt.savefig(filepath)
else:
plt.show()
plt.close()
def plotPxPySample(self, ppis: list[str], filepath: str, sampleProp: str) -> None:
"""Plots the px against py proteins for each ppi in ppis, and colors the samples according to the sampleProp
"""
data = self.data.copy()
samplesheet = pd.read_csv(PATH + '/internal/samplesheet.csv', index_col=0)
samplesCommon = data.index.intersection(samplesheet.index)
if len(samplesCommon) < data.shape[0]:
print('We have lost samples for proteomics since the samplesheet did not completely overlap with our samples, check for an updated samplesheet')
data = data.loc[samplesCommon, :]
samplesheet = samplesheet.loc[samplesCommon, :].fillna('NaN')
uniquePropValues = list(MatrixData(None, samplesheet).getUniqueSetValues(sampleProp)[0])
# if len(uniquePropValues) > 400:
# print(
# f'There are more than 400 unique values for {sampleProp}, which is not very informative, please choose a different sampleProp')
# return None
colors = {value: color for value, color in zip(uniquePropValues, sns.color_palette('dark', len(uniquePropValues)))}
numPPIs = len(ppis)
# Setting up plot axes
fig, ax = plt.subplots(numPPIs, 1, figsize=(15, 15*numPPIs))
for index, ppi in enumerate(ppis):
pxpyNames = ppi.split(';')
pxName, pyName = pxpyNames[0], pxpyNames[1]
px, py = data[pxName], data[pyName]
samplesCommon = px.index.intersection(py.index)
# Get only relevant samples and data
px, py = px.loc[samplesCommon], py.loc[samplesCommon]
try:
samplePropData = samplesheet.loc[samplesCommon, sampleProp]
except:
print("No Samples in Commmon between pxpy and feature")
continue
plottingData = pd.DataFrame(
{'px': px, 'py': py, sampleProp: samplePropData})
ax[index].scatter(plottingData['px'], plottingData['py'], c=plottingData[sampleProp].map(colors), label=[colors.values(), colors.keys()])
ax[index].set_title('test')
ax[index].set_xlabel(pxName)
ax[index].set_ylabel(pyName)
legend = [plt.Line2D([0], [0], marker='.', color=colors[key], label=f'{sampleProp} = ' + str(key)) for key in colors]
ax[index].legend(handles=legend, fontsize=8, framealpha=0.2)
# Save
fig.savefig(filepath)
plt.close()
def PCA(self, filepath:str='', numPC:int = 10, factorsName:str='', **pcaKwargs) -> tuple[PCA, DataFrame | Unbound]:
"""Generate the PCA of the protein data
Args:
filepath (str, optional): File name and directory where
the scree and PC cumulative variance plot will be stored. Defaults to ''.
numPC (int, optional): number of principal components used. Defaults to 10.
factorsName (str, optional): Name to give to the factors, appears on the Dataframe's (factors) columns. Defaults to ''.
Returns:
PCA: The returned object will be the PCA object with all the relevant atributes
factors: The new values of each observation on the Principal Component Space
"""
# Fit PCA
pca = PCA(n_components=numPC, **pcaKwargs).fit(self.data)
#Get the values of each sample on the new PC space
if factorsName != '':
#Change the name of the columns of the factors and scores given by the PCA Object
newColumns = [f'{factorsName}{i}' for i in range(1, numPC + 1)]
factors = pd.DataFrame(pca.fit_transform(self.data), index=self.data.index, columns=newColumns)
# Construct the plot of scree and the cumulative explained variance
# Get the explained variance and explained variance ratio
explained_variance = pca.explained_variance_
explained_variance_ratio = pca.explained_variance_ratio_
print(f"The sum of explained variance is {sum(explained_variance_ratio)}")
# Calculate the cumulative explained variance
cumulative_explained_variance = np.cumsum(explained_variance)
# Bar plot for explained variance ratios
ax1 = sns.barplot(x=np.arange(1, len(explained_variance_ratio) + 1), y=explained_variance_ratio, color='blue', alpha=0.8, edgecolor='k', linewidth=1, zorder=2)
ax1.set_xlabel('Principal Component')
ax1.set_ylabel('Explained Variance Ratio')
ax1.set_title('Scree Plot and Cumulative Explained Variance')
# Cumulative explained variance line plot
ax2 = ax1.twinx()
ax2.plot(cumulative_explained_variance, marker='o', color='red')
ax2.set_xlabel('Cumulative Explained Variance')
ax2.grid(False)
ax2.set_ylim(0)
plt.tight_layout(pad=2)
if filepath != '':
plt.savefig(filepath)
plt.close()
return pca, factors
class PairwiseCorrMatrix(MatrixData):
def __init__(self, proteomicsType:str, filepath: str = None, data: pd.DataFrame = None, proxies:list[str] = ['p-value', 'coef'], ascendings:list[bool] = [True, False],** readerKwargs):
"""Initialize the PairwiseCorrMatrix object, where we calculate the correlation between all the proteins in the data, and their p-value
Additionally we can use one of these values to calculate the auc of that proxy metric towards recalling PPIs in a specific yColumn Dataset, the ground truth
Args:
filepath (str, optional): filepath where Datafrane is stored to instatiate the object. Defaults to None.
data (pd.DataFrame, optional):Dataframe to instatiate Object. Defaults to None.
aucs (_type_, optional): dict with name of columns to which we calculate the auc and value. Defaults to None.
"""
super().__init__(filepath, data, **readerKwargs)
self.yColumn = []
self.proxies= proxies
self.ascendings = ascendings
self.labels = {proxy:{} for proxy in self.proxies}
self.corrCumSums = {proxy:{} for proxy in self.proxies}
self.indexes = {proxy:{} for proxy in self.proxies}
self.aucs = {proxy:{} for proxy in self.proxies}
self.proteomicsType = proteomicsType
def __str__(self):
print = super().__str__()
for proxy, proxyDict in self.aucs.items():
print = print + '\n' + str(proxy) + '\n'
for yColumn, aucVal in proxyDict.items():
print = print + str(self.labels[proxy][yColumn])
return print
def addGroundTruth(self, ppis: set, externalDatasetName: str):
"""Append the binary values of a putative PPI, from an external dataset (e.g Corum), to our pairwise correlation Dataframe
Args:
ppis (ppiDataset): ppiDataset of the external ppi dataset used
data (pd.DataFrame): Pairwise correlation dataframe
externalDatasetName (str): Name to give to the binary column holding the truth value of an PPI is seen in that external Dataset
Returns:
_type_: Data with added column
"""
data = self.data.copy()
data[externalDatasetName] = [int((pA,pB) in ppis) for pA,pB in zip(data['proteinA'], data['proteinB'])]
self.data = data
self.yColumn.append(externalDatasetName)
return data
@classmethod
def addGroundTruths(cls, insts:Iterable[PairwiseCorrMatrix]):
"""Adds all the ground truths of the existence of a PPI in an external PPI dataset to the PairwiseCorrMatrix object.
The external PPI datasets used are: Corum, Biogrid, String150, String400, String700, String900
Args:
insts (Iterable[PairwiseCorrMatrix]): An iterable of PairwiseCorrMatrix objects
"""
from .utils import read
#get all ppis
corum:ppiDataset = read(PATH + '/external/ppiDataset/corum.pickle.gz')
biogrid:ppiDataset = read(PATH + '/external/ppiDataset/biogrid.pickle.gz')
stringLow:ppiDataset = read(PATH + '/external/ppiDataset/string150.pickle.gz')
stringMedium:ppiDataset = read(PATH + '/external/ppiDataset/string400.pickle.gz')
stringHigh:ppiDataset = read(PATH + '/external/ppiDataset/string700.pickle.gz')
stringHighest:ppiDataset = read(PATH + '/external/ppiDataset/string900.pickle.gz')
ppis = [corum, biogrid, stringLow, stringMedium, stringHigh, stringHighest]
for inst in insts:
if 'proteinA' not in set(inst.data.columns) or 'proteinB' not in set(inst.data.columns):
inst.data['proteinA'] = [ppi.split(';')[0] for ppi in inst.data.index]
inst.data['proteinB'] = [ppi.split(';')[1] for ppi in inst.data.index]
for ppi in ppis:
#if the ppi set was already added to the dataframe, skip it
if ppi.name in inst.yColumn:
continue
inst.addGroundTruth(ppi.ppis, ppi.name)
#Save what was done!
inst.write()
def aucCalculator(self, yColumnName:str, proteomicsType:str, proxyColumn:str, ascending:bool):
"""Adds the value of AUC of the Recall curve using a specified external PPI dataset with yColumnName
Args:
yColumnName (str): Name of the df column where there is the truth value of the existence or not of the PPI in the reported external PPI dataset
label (str): Text which will show up as label next to the value of the AUC, e.g 'Baseline Auc == 0.9'
proxyColumn (str): Name of the column of the statistical meausure to quantify the probability of PPI existence
ascending(bool): should the proxyColumn be ordered ascending or not for best AUC calculation
Returns:
float: AUC of the recall curve
"""
pairwiseCorr = self.data.copy()
pairwiseCorr.sort_values(by=proxyColumn, ascending=ascending, inplace=True) # We sort rows by the smallest to greatest pValues
self.corrCumSums[proxyColumn][yColumnName] = np.cumsum(pairwiseCorr[yColumnName]) / np.sum(pairwiseCorr[yColumnName])
self.indexes[proxyColumn][yColumnName] = np.array(pairwiseCorr.reset_index().index) / pairwiseCorr.shape[0]
self.aucs[proxyColumn][yColumnName] = auc(self.indexes[proxyColumn][yColumnName], self.corrCumSums[proxyColumn][yColumnName]) # update aucs dict to have a new auc for a specific proxy column
# if not label: #if the user did not insert any label default it
# self.labels[proxyColumn] = f"(AUC {proxyColumn} {self.aucs[proxyColumn]:.2f})"
self.labels[proxyColumn][yColumnName] = f" ({proteomicsType} proteomics using {proxyColumn} \n AUC:{self.aucs[proxyColumn][yColumnName]:.2f} recalling {yColumnName})"
return self.aucs[proxyColumn][yColumnName]
def aucsCalculator(self, proteomicsType:str, proxyColumnList:list[str], ascendingList:list[bool], filepath:str = None):
"""Get the auc's of the recall curve for each proxy column (coef, p-value, etc) and for each external PPI dataset (corum, biogrid, etc)
Args:
proteomicsType (str): Name of the PairwiseCorrMatrix object
proxyColumnList (list[str]): List of proxies for PPI existence
ascendingList (list[bool]): List of booleans to indicate if the proxyColumn should be ordered ascending or not for best AUC calculation
filepath (str, optional): Filepath where to save the pairwiseCorrMatrix Object. Defaults to None.
"""
for aucIndex in range(len(proxyColumnList)):
for yColumn in self.yColumn:
self.aucCalculator(yColumn, proteomicsType, proxyColumnList[aucIndex], ascendingList[aucIndex])
if filepath is not None:
self.write(filepath)
print(self)
@classmethod
def heatmap(cls, insts:list[PairwiseCorrMatrix], columns: list[str], bins:int, proteomics:ProteinsMatrix, filepath:str, title:str):
"""Creates a heatmap of the pairwise correlation between two columns of two PairwiseCorrMatrix objects, in order to see the order of missing values in a specifc range of both Pairwse Proxy correlation range
Args:
insts (list[PairwiseCorrMatrix]): Two PairwiseCorrMatrix objects
columns (list[str]): Names of the proxy columns for each PairwiseCorrMatrix object
bins (int): Number of bins to use for the heatmap
proteomics (ProteinsMatrix): Proteomics object to get the msv values
filepath (str): Path to save the heatmap
title (str): Title of the heatmap
Returns:
_type_: Dataframe with the values of the heatmap, and the number of PPIs in each bin
"""
dfs = [instance.data[column].copy() for instance, column in zip(insts, columns)]
df = pd.concat(dfs, join='inner', axis=1)
# Bin the two series that make up the dataframe with equal bins and return the intervals of each bin used
df['bin0'] = pd.qcut(df[columns[0]], bins, precision=2)
df['bin1'] = pd.qcut(df[columns[1]], bins, precision=2)
intervals0 = sorted(df['bin0'].unique())
intervals1 = sorted(df['bin1'].unique())
heatmapData = pd.DataFrame()
heatmapNumPPIs = pd.DataFrame()
for interval0, interval1 in list(product(intervals0, intervals1)):
colData = df.loc[df['bin0'] == interval0]
rowData = df.loc[df['bin1'] == interval1]
ppisCommon = set(colData.index.intersection(rowData.index)) # What are the ppis in common by the two queries
mvs = 0 #missing values counter
for ppi in ppisCommon: # count missing values
proteinA = ppi.split(';')[0]
proteinB = ppi.split(';')[1]
mv = proteomics.data[[proteinA, proteinB]].isna().sum().sum()
mvs = mvs + mv
numPPIs = len(ppisCommon)
if numPPIs == 0:
mvsPerPPI = 0
else:
mvsPerPPI = mvs / numPPIs #Standardise Mv in a query by the total number of ppis belonging to that query
heatmapData.loc[str(interval0),str(interval1)] = mvsPerPPI
heatmapNumPPIs.loc[str(interval0),str(interval1)] = numPPIs
plt.figure(figsize=(8,8))
sns.heatmap(heatmapData, annot=True, cmap='YlOrRd', fmt=".1f")
plt.xlabel('Pearson R')
plt.xticks(rotation=0)
plt.ylabel('$β_{GLS}$')
plt.title(title)
plt.savefig(filepath)
plt.close()
#Number of PPIS per range heatmap
plt.figure(figsize=(8,8))
sns.heatmap(heatmapNumPPIs, annot=True, cmap='YlOrRd', fmt=".0f")
plt.xlabel('Pearson R')
plt.xticks(rotation=0)
plt.ylabel('$β_{GLS}$')
plt.title(title + '\n' +' Number of PPIs')
plt.savefig(filepath.split('.')[0] + '#PPIs' + filepath.split('.')[1])
return heatmapData, heatmapNumPPIs
@classmethod
def getAucs(cls,instances:Iterable[PairwiseCorrMatrix]):
"""Calculates the Aucs for a list of PairwiseCorrMatrix objects
Args:
instances (Iterable[PairwiseCorrMatrix]): List of PairwiseCorrMatrix objects
"""
for instance in instances:
instance.yColumn = ['corum', 'biogrid','stringLow', 'stringMedium', 'stringHigh', 'stringHighest'] #The External Datasets in Usage
# instance.proxies= ["p-value", "coef"]
# instance.ascendings = [True, False]
# instance.labels = {proxy:{} for proxy in instance.proxies}
# instance.corrCumSums = {proxy:{} for proxy in instance.proxies}
# instance.indexes = {proxy:{} for proxy in instance.proxies}
# instance.aucs = {proxy:{} for proxy in instance.proxies}
instance.aucsCalculator(instance.proteomicsType, instance.proxies, instance.ascendings, instance.filepath)
class DrugResponseMatrix(MatrixData):
"""Class interface and methods for the drug response data"""
def __init__(self, filepath: str=None, data: pd.DataFrame=None, **readerKwargs):
super().__init__(filepath, data, **readerKwargs)
def binrise(self, deathThresh: int = 3, inplace: bool = True):
"""Creates a Binary representation of the Drug response matrix, where 0 means no efficacy and 1 efficacy. This is done bye calculating a threshold, 50% of the natural log of the [max screen]
Args:
deathThresh (int, optional): Number required of 'responsive' cell lines for a certain drug to be considered in the final set. Defaults to 3.
"""
data = self.data.copy()
maxScreenConc = pd.read_csv(PATH + '/external/drugMaxScreenConcentration.csv', index_col='Unnamed: 0')
maxScreenConc.index.name = 'drug'
data = data.merge(maxScreenConc, on='drug') # We only work with drugs for which we have a max screen concentration and IC50
# With 50% of the natural log of the max Screen Concentration we can create an empirically proven threshold,
# where generelly IC50 values greater than that mean that The drug has no efficacy, and below drug has efficancy,
# or in other words the cell line doesn't or does 'respond to a drug'
data['efficacyThreshold']= data['MAX_CONC'].apply(lambda row: math.log(row * 0.5))
data.drop(columns=['MAX_CONC'], inplace=True)
# Create mask that see if all columns are less than the values in the threshold col, and convert them to ints so the bool's become either 0 or 1
data:pd.DataFrame = data.apply(lambda row: row < row['efficacyThreshold'], axis=1).astype(int)
relevantDrugs = (data.sum(axis=1) >= deathThresh) # Condition that only accounts for drugs that kill at leats 3 cell lines
if inplace:
self.data = data.loc[relevantDrugs].drop(columns=['efficacyThreshold']) # apply condition (We lose 130 drugs, 697 -> 567) 26/4/23
else:
return data.loc[relevantDrugs].drop(columns=['efficacyThreshold'])
class GeneDependency(MatrixData):
"""Class interface and methods for the gene dependency data"""
def __init__(self, pValues:pd.DataFrame, fdrDone:bool, filepath: str|None= None, data: pd.DataFrame|None= None, name:str|None = None,**readerKwargs):
super().__init__(filepath, data, **readerKwargs)
self.name = name
self.pValues = pValues
self.genes = self.data.columns #At first we include all genes
self.areIdsCorrected = GeneDependency.isCorrectIds(self.data)
if not fdrDone:
self.fdrCorrection()
def resetFilter(self):
"""Resets the filter to include all genes
"""
self.genes = self.data.columns
def fdrCorrection(self)->None:
""" Apply fdr correction to the pValues of the gene dependency matrix, for each gene
"""
pValues = self.pValues.copy()
# Apply fdr multipletesting correction in each column (default, axis = 0), or gene
pValues = pValues.apply(lambda col: multipletests(col, method='fdr_bh')[1])
self.pValues = pValues
@staticmethod
def convertBroadToSangerId(broadId:str)->str|None:
"""Converts a Broad Id to a Sanger Id
Args:
broadId (str): Broad Id
Returns:
str: Sanger Id
"""
mappingFile = pd.read_csv(PATH + '/internal/geneInteractionModel/CRISPRSamplesheet.csv', index_col=0)
try:
sangerID = str(mappingFile.loc[mappingFile['BROAD_ID'] == broadId].index[0])
except IndexError:
sangerID = None
print(f"IndexError: {broadId} not found in mappingFile")
return sangerID
@staticmethod
def isCorrectIds(data:pd.DataFrame)->bool:
"""Check if the gene dependency matrix has the correct ids, which should be Sanger Ids
Returns:
bool: Is the Dataframe indexed by Sanger Ids?
"""
result = 1
for index in list(data.index):
if index[0:4] == "SIDM":
result = result*1
else:
result = result*0
return bool(result)
def correctIds(self)->None:
"""Correct the Ids on the gene dependency matrix, so that they are the same as the ones in the protein matrix, they should be Sanger Ids
"""
print(f"The Current shape of the gene dependecy is {self.data.shape}")
if not self.isCorrectIds(self.data): # Ids not correct
data = self.data.copy()
for broadID in list(data.index):
sangerID = self.convertBroadToSangerId(broadID)
if sangerID is None: # If we can't find the sanger Id, we drop the row
data.drop(index=broadID, inplace=True)
else:
data.rename(index={broadID:sangerID}, inplace=True)
self.data = data
if not self.isCorrectIds(self.pValues):
data = self.pValues.copy()
for broadID in list(data.index):
sangerID = self.convertBroadToSangerId(broadID)
if sangerID is None: # If we can't find the sanger Id, we drop the row
data.drop(index=broadID, inplace=True)
else:
data.rename(index={broadID:sangerID}, inplace=True)
self.pValues = data
print(f"After correction of ids, the shape of the gene dependecy is {self.data.shape}")
self.areIdsCorrected = True
def scaleSamples(self)->pd.DataFrame:
"""
Scale the gene dependency data,
with geneDependencyData = (geneDependencyData - median(nonEssentialGenes)) / (median(nonEssentialGenes) - median(essentialGenes))
So that the median of the nonEssentialGenes is 0, and the median of the essential genes is -1, so the more negative the value,
the more essential the gene is. The more positive the value, the more non essential the gene is.
"""
data = self.data.copy()
def scale(sample:pd.Series)->pd.Series:
setOfEssentialGenes = set(pd.read_csv(PATH + '/external/achillesCommonEssentialControls.csv', index_col=0).index)
setOfNonEssentialGenes = set(pd.read_csv(PATH + '/external/achillesNonessentialControls.csv', index_col=0).index)
#Only use genes which are present in the sample
setOfEssentialGenes = list(set.intersection(set(sample.index), setOfEssentialGenes))
setOfNonEssentialGenes = list(set.intersection(set(sample.index), setOfNonEssentialGenes))
essentialGenes = sample.loc[setOfEssentialGenes]
nonEssentialGenes = sample.loc[setOfNonEssentialGenes]
essentialMedian = essentialGenes.median()
nonEssentialMedian = nonEssentialGenes.median()
sample = sample.apply(lambda x: (x - nonEssentialMedian) / (nonEssentialMedian - essentialMedian))
return sample
data = data.apply(scale, axis=1)
return data
def filterGenes(self, skewThresh:float = -1.25, medianScallingThresh:float =-0.5):
#get median filtered gene dependency data, all samples are equivalente
data = self.scaleSamples()
print(f"Finnished scaling samples per median of essential and non essential set of genes and selecting only genes with at least one sample of value less than {medianScallingThresh}")
genesOfInterest = data.columns[(data < medianScallingThresh).any()]
print(f"From all the gene data of shape {data.shape[1]}, from the first filtration the set of genes is of size {len(genesOfInterest)}")
data = data.loc[:, genesOfInterest]
skewResults = skew(data, axis=0, nan_policy='omit').reshape(1, -1)
skewResults = pd.DataFrame(skewResults, columns=data.columns)
genesOfInterest = data.columns[(skewResults < skewThresh).any()]
print(f"From all the gene data of shape {data.shape[1]}, from the second filtration the set of genes is of size {len(genesOfInterest)}")
self.genes = genesOfInterest
def createInteractionModel(
self,
ppis:Iterable[tuple[str, str]],
proteomics :ProteinsMatrix,
M:pd.DataFrame | pd.Series, **modelKwargs) ->DRInteractionPxModel:
"""Creates an Interaction Model using instead of the Drug Response (samples*drug),
it uses the gene dependency data (samples*genes)
Args:
ppis (Iterable[tuple[str, str]]): PPIS to use in the interaction model
proteomics (ProteinsMatrix): Proteomics data to use in the interaction model
M (DataFrame | Series[Unknown]): Possible Confouding factors to use in the interaction model
Returns:
DRInteractionPxModel: Interaction Model
"""
if not self.areIdsCorrected:
self.correctIds()
# Only input genes in the model that are the important ones, the filtered genes
geneDependencyData = self.data.loc[:, list(self.genes)]
print(f"From all the gene data of shape {self.data.shape}, we only use {geneDependencyData.shape}")
#Get Interaction Model
interactionModel = DRInteractionPxModel(ppis, proteomics, geneDependencyData, M, isGeneData = True, **modelKwargs)
return interactionModel
class ResiduesMatrix(MatrixData):
def __init__(self, filepath: str=None, data: pd.DataFrame=None, **readerKwargs):
super().__init__(filepath, data, **readerKwargs)
def getLinearModel(self, drugResponse: DrugResponseMatrix, samplesheet:pd.DataFrame, residualsType:str='TLS')->ResidualsLinearModel:
X = self.data.copy()
Y: pd.DataFrame = drugResponse.data.copy().T # Samples should be rows and not columns
Y = Y.fillna(Y.mean())
confoundingFactors = samplesheet[['tissue']].dropna(axis=0, how='any')
confoundingFactors['lung'] = (confoundingFactors['tissue'] == 'Lung').astype(int)
confoundingFactors['hymCellLine'] = (confoundingFactors['tissue'] == 'Haematopoietic and Lymphoid').astype(int)
confoundingFactors = confoundingFactors.drop(columns=['tissue'])
regressor = ResidualsLinearModel(Y, X, confoundingFactors, residualsType=residualsType)
regressor.fit_matrix()
return regressor
class GeneralLinearModel(MatrixData):
"""Authored by professor Emanuel Gonçalves, this call allows fo the computation of the linear regression coefs for a set of features.
Do check if the response variable is Normally distributed, since this is nor a GLM.
There is no logging and covariate normalisation should be done prior to creating the object.
"""
def __init__(
self,
Y,
X,
M,
M2=None,
fit_intercept=True,
copy_X=True,
n_jobs=4,
verbose=1,
addNoise:bool = False,
filepath: str=None,
data: pd.DataFrame=None,
**readerKwargs
):
super().__init__(filepath, data, **readerKwargs)
self.samples = set.intersection(
set(Y.index),
set(X.index),
set(M.index),
set(Y.index) if M2 is None else set(M2.index),
)
self.samples = list(self.samples) # make sure it's a list, because indexing by sets is deprecated
self.X = X.loc[self.samples]
self.X = self.X.loc[:, self.X.count() > (M.shape[1] + (1 if M2 is None else 2))]
self.X = pd.DataFrame(StandardScaler().fit_transform(self.X), index=self.X.index, columns=self.X.columns)
self.X_ma = np.ma.masked_invalid(self.X.values)
self.Y = Y.loc[self.samples]
self.Y = self.Y.loc[:, self.Y.std() > 0]
self.M = M.loc[self.samples]
self.M2 = M2.loc[self.samples, self.X.columns] if M2 is not None else M2
self.fit_intercept = fit_intercept
self.copy_X = copy_X
self.n_jobs = n_jobs
self.verbose = verbose
self.addNoise = addNoise
# self.log = logging.getLogger("Crispy")
def model_regressor(self):
regressor = LinearRegression(
fit_intercept=self.fit_intercept,
copy_X=self.copy_X,
n_jobs=self.n_jobs,
)
return regressor
@staticmethod
def loglike(y_true, y_pred):
nobs = len(y_true)
nobs2 = nobs / 2.0
ssr = np.power(y_true - y_pred, 2).sum()
llf = -nobs2 * np.log(2 * np.pi) - nobs2 * np.log(ssr / nobs) - nobs2
return llf
@staticmethod
def multipletests_per(
associations, method="fdr_bh", field="pval", fdr_field="fdr", index_cols=None
):
index_cols = ["y_id"] if index_cols is None else index_cols
d_unique = {tuple(i) for i in associations[index_cols].values}
df = associations.set_index(index_cols)
df = pd.concat(
[
df.loc[i]
.assign(fdr=multipletests(df.loc[i, field], method=method)[1])
.rename(columns={"fdr": fdr_field})
for i in d_unique
]
).reset_index()
return df
def fit_matrix(self):
lms = []
for x_idx, x_var in enumerate(self.X):
# if self.verbose > 0:
# self.log.info(f"LM={x_var} ({x_idx})")
# Mask NaNs
x_ma = np.ma.mask_rowcols(self.X_ma[:, [x_idx]], axis=0)
# Build matrices
x = self.X.iloc[~x_ma.mask.any(axis=1), [x_idx]]
y = self.Y.iloc[~x_ma.mask.any(axis=1), :]
# Covariate matrix (remove invariable features and add noise)
m = self.M.iloc[~x_ma.mask.any(axis=1), :]
if self.M2 is not None:
m2 = self.M2.iloc[~x_ma.mask.any(axis=1), [x_idx]]
m = pd.concat([m2, m], axis=1)
# m = m.loc[:, m.std(numeric_only = True) > 0]
if self.addNoise:
m += np.random.normal(0, 1e-6, m.shape)
# Fit covariate model
lm_small = self.model_regressor().fit(m, y)
lm_small_ll = self.loglike(y, lm_small.predict(m))
# Fit full model: covariates + feature
lm_full_x = np.concatenate([m, x], axis=1)
lm_full = self.model_regressor().fit(lm_full_x, y)
betasFeature = lm_full.coef_[:,-1]
meanBetasCovariates = lm_full.coef_[:, 0:-1]
lm_full_ll = self.loglike(y, lm_full.predict(lm_full_x))
# Log-ratio test
lr = 2 * (lm_full_ll - lm_small_ll)
lr_pval = chi2(1).sf(lr)
rSquared = lm_full.score(lm_full_x, y)
#if Rsquared is significant print the association that is significant
if rSquared > 0.65:
print(f"Association: {x_var} and {y.columns} is significant with R^2 = {rSquared}")
# Assemble + append results
res = pd.DataFrame(
dict(
y_id=y.columns,
x_id=x_var,
n=y.attrs["nan_mask"].loc[y.columns, x.index].sum(1) if "nan_mask" in y.attrs else len(x),
beta=betasFeature,
lr=lr.values,
covs=m.shape[1],
pval=lr_pval,
fdr=multipletests(lr_pval, method="fdr_bh")[1],
rSquared = rSquared
)
)
smallerBetas = pd.DataFrame(meanBetasCovariates)
smallerBetas.columns = [str(col) + 'Beta' for col in m.columns]
res = pd.concat([res, smallerBetas], axis=1)
lms.append(res)
lms = pd.concat(lms, ignore_index=True).sort_values("pval")
self.data = lms #Regression Results, of each X towwards every Y
return lms
@staticmethod
def lm_residuals(y, x, fit_intercept=True, add_intercept=False):
# Prepare input matrices
ys = y.dropna()
xs = x.loc[ys.index].dropna()
xs = xs.loc[:, xs.std() > 0]
ys = ys.loc[xs.index]
if ys.shape[0] <= xs.shape[1]:
return None
# Linear regression models
lm = LinearRegression(fit_intercept=fit_intercept).fit(xs, ys)
# Calculate residuals
residuals = ys - lm.predict(xs) - lm.intercept_
# Add intercept
if add_intercept:
residuals += lm.intercept_
return residuals
class TLSRegression():
"""Implements Total Least Squares Regression"""
def __init__(self, Y:pd.DataFrame, X:pd.DataFrame, copy_X = True, fitIntercept=False, standardise:bool = True):
self.Y = Y
self.X = X
self.samples = set.intersection(
set(Y.index),
set(X.index),
)
self.samples = list(self.samples) # make sure it's a list, because indexing by sets is deprecated
self.X = X.loc[self.samples]
if standardise:
self.X = pd.DataFrame(StandardScaler().fit_transform(self.X), columns=self.X.columns, index=self.X.index) # Standardize X
self.X_ma = np.ma.masked_invalid(self.X.values)
self.Y = Y.loc[self.samples]
self.fitIntercept = fitIntercept
@staticmethod
def tlsRegression(X:pd.Series|pd.DataFrame, Y:pd.Series|pd.DataFrame, fitIntercept=True):
"""Calculates the TLS regression of X on Y.
Args:
X (_type_): Covariates, excluding intercept
Y (_type_): Response variable
Returns:
residuals, betas, predY, predX _type_:
(The regression's Residuals calculated with Forbenious norm of the errors of both X and Y, regression coefficients (includes beta0), predicted Y values, predicted X Values)
"""
features = ['intercept'] + list(X.columns)
samples = list(X.index)
assert samples == list(Y.index), "X and Y must have the same samples"
X = X.to_numpy() # Convert to numpy array
Y = Y.to_numpy() # Convert to numpy array
if fitIntercept:
ones = np.ones((X.shape[0], 1))
X = np.concatenate((ones, X), axis=1)
n = X.shape[1] # Get number of covariates
XY = np.column_stack((X, Y)) # Join X and Y
# Calculate the SVD of XY
_, _, VT_XY = np.linalg.svd(XY)
V_XY = VT_XY.T
Vxy = V_XY[0:n, n:]
Vyy = V_XY[n:, n:]
#Calculate the TLS estimator, and predictions
betas = -np.divide(Vxy,Vyy) # The regression coefficients of TLS regression
errorsXY = (-XY @ V_XY[:, n:] @ V_XY[:, n:].T) # The matrix of errors of X and Y
errorX:np.ndarray = errorsXY[:, 0:n]
errorY:np.ndarray = errorsXY[:, n:]
predX = (X + errorX)
predY = predX @ betas
residuals = np.linalg.norm(errorsXY, axis=1)# Given by the frobenius Norm of the matrix of error of X and Y
# print( f"residuals: \n {residuals}\n β_n:\n {betas}\n predictedY:\n {predY} \n predictedX:\n {predX} ")
residuals = pd.DataFrame(residuals, index=samples, columns=['residualsTLS'])
betas = pd.DataFrame(betas, index=features, columns=['betasTLS'])
predY = pd.DataFrame(predY, index=samples, columns=['predYTLS'])
predX = pd.DataFrame(predX, index=samples, columns=[f"{feature}_predXTLS"for feature in features])
return residuals, betas, predY, predX
def fit(self):
"""Fits the TLS regression model.
Returns:
self.residuals, self.betas, self.predY, self.predX
"""
self.residuals, self.betas, self.predY, self.predX = self.tlsRegression(self.X, self.Y)
return self.residuals, self.betas, self.predY, self.predX
# def logLikelihoodTest(self):
# X = self.X.to_numpy() # Load Data
# Y = self.Y.to_numpy()
# M = self.M.to_numpy()
# # large Model
# if self.M2 is not None: # Use M2 if it exists as confounding factor
# M2 = self.M2.to_numpy()
# M = np.concatenate([M, M2], axis=1)
# largeModel = np.concatenate([M, X], axis=1) # Join all covariates
# # Small Model
# if self.M2 is not None: # Use M2 if it exists as confounding factor
# M2 = self.M2.to_numpy()
# M = np.concatenate([M, M2], axis=1)
# smallModel = M
# #TODO: Understand how to calculate the logLikelihoodTest when in TLS regression
class ResidualsLinearModel(GeneralLinearModel):
def __init__(self, Y, X, M, M2=None, fit_intercept=True, copy_X=True, n_jobs=4, verbose=1, residualsType:str = "TLS"):
assert residualsType in ["TLS", "malahanobis", None], "residualsType must be either TLS, None or malahanobis"
if residualsType is not None:
residualsCols = [col for col in X.columns if col[1] == residualsType]
X = X.loc[:, residualsCols]
X.columns = ['-'.join(col) for col in X.columns]
super().__init__(Y, X, M, M2, fit_intercept, copy_X, n_jobs, verbose)
def volcanoPlot(self, filepath:str, falseDiscoveryRate:float=0.01, pValHzLine:float = 0.001):
"""
Volcano plot in order to find statisticall relevant relationships.
"""
data = self.data.copy()
data = data.loc[data['fdr'] < falseDiscoveryRate]
yValues = -np.log10(data['pval'])
xValues = data['beta']
# Plot
plt.scatter(
xValues,
yValues,
c="k",
s=5,
alpha=0.5,
rasterized=True,
)
# Labels
plt.xlabel(r"$\beta$")
plt.ylabel(r"$-\log_{10}(p-value)$")
# Grid
plt.axvline(0, c="k", lw=0.5, ls="--")
plt.axhline(-np.log10(pValHzLine), c="k", lw=0.5, ls="--", label=f"p-value = {pValHzLine}")
# Title
plt.title("Volcano plot")
plt.legend()
self.volcanoPath = filepath
plt.savefig(filepath, bbox_inches="tight")
plt.close()
def plotSignificantAssociation(self, proteomics:ProteinsMatrix, drugResponse:DrugResponseMatrix, filepath:str):
results = self.data.copy()
results = results.sort_values(by='pval', ascending=True)
drug = results['y_id'].iloc[0]
ppi = results['x_id'].iloc[0]
proteomics.plotPxPyDrug(drug, ppi, drugResponse,filepath)
class UnbiasedResidualsLinModel(MatrixData):
"""This class is a Linear Model that uses the residuals of the linear regression between X and Y as the new predictor for Drug response.
But it does it by taking into consideration some confounding factors M, in our case growth props.
"""
def __init__(self, ppis:Iterable[tuple[str,str]], proteomics:ProteinsMatrix, drugRes:DrugResponseMatrix, M:pd.DataFrame|pd.Series, fitIntercept=True, copy_X=True, standardisePx = True):
"""Will create linear model, that will be used for the two fits.
Args:
Y (_type_): Protein A expression
X (_type_): Protein B expression
M (_type_): Confounding Factors in Dataframe
drugRes (DrugResponseMatrix): Drug Response Object
fitIntercept (bool, optional): Does the Linear Model have an intercept parameter. Defaults to False.
copy_X (bool, optional): _description_. Defaults to True.
"""
self.ppis = ppis
self.proteomics = proteomics
self.M = M
self.fitIntercept = fitIntercept
self.copy_X = copy_X
self.drugRes = drugRes
self.drugRes.data =drugRes.data.T #Because in this object samples are columns
self.standardisePx = standardisePx
def checkCompleteObservations(self, X:pd.DataFrame, Y:pd.DataFrame):
""" Removes samples that are not in common between X, Y, M and drug response matrices, which won't be useful
Args:
X (pd.DataFrame): Protein A expression
Y (pd.DataFrame): Protein B expression
Returns:
_type_: (X, Y , self.M) with only common samples
"""
X.dropna(axis=0, how='any', inplace=True)
Y.dropna(axis=0, how='any', inplace=True)
M :pd.DataFrame = self.M.dropna(axis=0, how='any')
#Check if M is Categorical
try:
categoricalCols = M.select_dtypes(include=['object']).columns
M = pd.get_dummies(M, columns=categoricalCols)
M.drop(columns=categoricalCols, inplace=True)
except AttributeError as e:
print(f"{e} M is a pd.Series")
M = pd.get_dummies(M)
M = M.iloc[:,1:]
self.samples = set.intersection(set(X.index), set(Y.index), set(M.index), set(self.drugRes.data.index)) # Get common samples
if len(self.samples) < 50:
print(f"Not enough samples, {len(self.samples)}, in common between X, Y, M and drug response matrices \n Skipping")
return None
X = X.loc[self.samples]
Y = Y.loc[self.samples]
M = M.loc[self.samples]
return X, Y, M
def twoFits(self) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
""" Performs two fits the first TLS Model with Py ~ M + Px and the second gLM with DrugResponse ~ M + residuals of the first fit
Returns:
tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: self.firstModelResiduals, self.firstModelBetas, self.secondModelResults
"""
# Get the residuals of the first fit
self.firstModelResiduals, self.firstModelBetas, M = self.fitPxPy() # This will give the coufounding factors M dummified
# Fit the second model using the residuals of the first model as the new predictor for drug response
Y = self.drugRes.data
Y.fillna(Y.mean(), inplace=True)
self.secondModel:GeneralLinearModel = GeneralLinearModel(self.drugRes.data, self.firstModelResiduals, M, fitIntercept=self.fitIntercept, copy_X=self.copy_X)
self.secondModelResults = self.secondModel.fit_matrix()
return self.firstModelResiduals, self.firstModelBetas, self.secondModelResults
def fitPxPy(self):
""" Fits the first model Py ~ M + Px, recursively for every PPI in self.ppis.
It then returns the residuals of this recursion, for every PPI in self.ppis. Along with the coeficients of the regression.
Returns:
self.firstModelResiduals, self.firstModelBetas tuple[pd.DataFrame,pd.DataFrame]: Residuals of the first model and the coeficients of the regression
"""
for XName, YName in self.ppis:
X = self.proteomics.data.loc[:,XName]
X = X.rename('proteinX')
if self.standardisePx:
X = (X - X.mean()) / X.std() # Standardise X
Y = self.proteomics.data.loc[:,YName]
XYMComplete = self.checkCompleteObservations(X, Y)
if XYMComplete is None:
print(f"X is {XName} and Y is {YName}, and there are no common samples between them, Drug Response and M \n Skipping")
continue
else:
X, Y, M = XYMComplete
X = pd.concat([X, M], axis=1)
model:TLSRegression = TLSRegression(Y, X, fitIntercept=self.fitIntercept, copy_X=self.copy_X, standardise=False)
residuals,betas,_,_ = model.fit()
residuals.columns = [f"{YName};{XName}"] # Rename columns to match the PPI
betas.columns = pd.MultiIndex.from_product([[YName], [XName]]) # Rename columns to match the PPI
betas = betas.T
residuals.index.name = "sample"
betas.index.name = "sample"
try:# If residuals already exists, merge them
self.firstModelResiduals = pd.merge(self.firstModelResiduals, residuals, how='outer', on='sample')
except AttributeError:
self.firstModelResiduals = residuals
try:# If betas already exists, concatenate them
self.firstModelBetas = pd.concat([self.firstModelBetas, betas], axis=0)
except AttributeError:
self.firstModelBetas = betas
assert self.firstModelResiduals is not None and self.firstModelBetas is not None, f"There are no Py ~ Px + M that has overlaping samples, so we can't build the first of the models"
return self.firstModelResiduals, self.firstModelBetas, M
def plotSignificantAssociation(self, proteomics:ProteinsMatrix, drugResponse:DrugResponseMatrix, filepath:str):
results = self.secondModelResults.copy()
results = results.sort_values(by='pval', ascending=True)
drug = results['y_id'].iloc[0]
ppi = results['x_id'].iloc[0]
if len(ppi.split(':')) > 0:
ppi = ppi.split(':')[0] + ';' + ppi.split(':')[1]
proteomics.plotPxPyDrug(drug, ppi, drugResponse, filepath)
def loglike(y_true, y_pred):
nobs = len(y_true)
nobs2 = nobs / 2.0
ssr = np.power(y_true - y_pred, 2).sum()
llf = -nobs2 * np.log(2 * np.pi) - nobs2 * np.log(ssr / nobs) - nobs2
return llf
def extraSumSquares(largeNumCov: int, smallNumCov:int, trueY:pd.DataFrame, largePredY:np.ndarray, smallPredY:np.ndarray):
"""Calculates the extra sum of squares, given the number of covariates in the large and small models,
the true Y values and the predicted Y values for both models
Args:
largeNumCov (int): Number of covariates in the large model
smallNumCov (int): Number of covariates in the small model
trueY (pd.Series): True Y values
largePredY (pd.Series): Predicted Y values of the large model
smallPredY (pd.Series): Predicted Y values of the small model
Returns:
float: Extra sum of squares pValue
"""
largeResiduals = trueY - largePredY
smallResiduals = trueY - smallPredY
largeResidualsSq = np.power(largeResiduals, 2)
smallResidualsSq = np.power(smallResiduals, 2)
largeResidualsSqSum = largeResidualsSq.sum()
smallResidualsSqSum = smallResidualsSq.sum()
q = largeNumCov - smallNumCov
largeDF = trueY.shape[0] - largeNumCov
extraSumSquares = smallResidualsSqSum - largeResidualsSqSum
statisticNumerator = extraSumSquares / q
statisticDenominator = largeResidualsSqSum / largeDF
statistic = statisticNumerator / statisticDenominator
pValue = f.sf(statistic, q, largeDF)
return pValue
def ppiWrapper(
ppi:tuple[str, str],
self: DRPxPyInteractionPxModel)-> dict:
def modelRegressor(fitIntercept, copyX, nJobs):
regressor = LinearRegression(
fit_intercept=fitIntercept,
copy_X=copyX,
n_jobs=nJobs,
)
return regressor
def linearModel(Y: pd.DataFrame, X: pd.DataFrame, M: pd.DataFrame, interactor: pd.DataFrame, fitIntercept:bool, copyX:bool, nJobs:int) -> dict:
"""Get the Linear Models (Larger and Smaller) for the given Protein X and Y Names
It does this by subsetting the proteomics, drugRes, and M dataframes to only include samples common to all dataframes.
And then builds the OLS models from statsmodels.api library.
Args:
YName (str): Protein Y name
XName (str): Protein X name
drugName (str): Drug name
Returns:
sm.OLS: larger Linear Model from statsmodels.api library
sm.OLS: Smaller Linear Model from statsmodels.api library
dict: Results of the linear model in dictionary format, with keys being effect size, p-value and other general info
"""
# yChar:str = formula.split('~')[0].strip()
# xChars:list[str] = formula.split('~')[1].split('+')
# #find elemnt with * for the interaction term
# for elem in xChars:
# if '*' in elem:
# interactionMembers = elem.split('*')
interaction = interactor.iloc[:,0].mul(X.iloc[:,0], axis=0)
interaction.name ='interaction'
#reordering of expressions to build the smaller and larger models
# Large Model: Y ~ X + M + interactor + interaction
xLarge = pd.concat([X, M, interactor, interaction], axis=1)
# Make sure all columns are strings
xLarge.columns = xLarge.columns.astype(str)
# 1st small Model : Y ~ X + M + interactor, test interaction
xSmall = [pd.concat([X, M, interactor], axis=1)]
# 2nd small Model : Y ~ X + M + interaction, test interactor
xSmall.append(pd.concat([X, M, interaction], axis=1))
# 3rd small Model : Y ~ M + interactor + interaction, test X
xSmall.append(pd.concat([M, interactor, interaction], axis=1))
tested = ['interactionPValue','interactorPValue', 'XPValue']
lmLarge =modelRegressor(fitIntercept,copyX, nJobs).fit(xLarge, Y)
# lmLargeLogLike = loglike(Y, lmLarge.predict(xLarge))
coefs = lmLarge.coef_.tolist()[0]
columns = ['X'] + M.columns.tolist() + ['interactor'] + ['interaction']
res = {f"{col}ES":[coefs[index]] for index,col in enumerate(columns)}
for index, x in enumerate(xSmall):
x.columns = x.columns.astype(str)
lmSmall = modelRegressor(fitIntercept,copyX, nJobs).fit(x, Y)
# # llr
# lmSmallLogLike = loglike(Y, lmSmall.predict(x))
# lmSmallResidualsSq = np.power(Y - lmSmall.predict(x), 2)
# lr = 2 * (lmLargeLogLike - lmSmallLogLike)
# LogLikeliRatioPVal = chi2.sf(lr, X.shape[1])
# Extra sum of squares test
if fitIntercept: # If the model has an intercept, then we need to add 1 to the number of covariates in the large and small models, because we are calculating an extra parameter, the intercept
extraPValue = extraSumSquares(xLarge.shape[1] + 1, M.shape[1] + 1, Y, lmLarge.predict(xLarge), lmSmall.predict(x))
else:
extraPValue = extraSumSquares(xLarge.shape[1], M.shape[1], Y, lmLarge.predict(xLarge), lmSmall.predict(x))
res[tested[index]] = extraPValue.tolist()
res[f'fdr{tested[index]}'] = list(multipletests(extraPValue, method="fdr_bh")[1])
res['Y'] = [Y.columns[0]]
res['X'] = [X.columns[0]]
res['interactor'] = [interactor.columns[0]]
res['n'] = [Y.shape[0]]
res['interceptES'] = [lmLarge.intercept_[0]]
return res
for index, drug in enumerate(self.drugRes.columns):
Y = self.drugRes.loc[:,[drug]]
X = self.proteomics.loc[:,[ppi[0]]]
M = self.M
interactor = self.proteomics.loc[:,[ppi[1]]]
Y = Y.fillna(Y.mean())
X = X.dropna()
M = M.dropna(axis=0)
interactor = interactor.dropna()
#get samples common to all dataframes
samplesCommon = list(set.intersection(
set(Y.index), set(X.index), set(interactor.index), set(M.index)
))# samples common to all dataframes
if len(samplesCommon)==0:
print(f"No samples in common between X, Y, M and interactor for {ppi} and {drug}, skipping")
continue
samplesCommon.sort()
#subset dataframes to common samples
Y = Y.loc[samplesCommon]
X = X.loc[samplesCommon]
interactor = interactor.loc[samplesCommon]
M = M.loc[samplesCommon]
if (X.std() == 0).any() or (interactor.std() == 0).any() or (M.std() == 0).any():
print(f"X std is {X.std()}, interactor std is {interactor.std()} and M std is {M.std()} for {ppi} and {drug}, skipping")
print(f"X, interactor and M are constant for {ppi} and {drug}, skipping")
continue
X = (X - X.mean()) / X.std()
interactor = (interactor - interactor.mean()) / interactor.std()
M = (M - M.mean()) / M.std()
res = linearModel(Y, X, M, interactor, self.fitIntercept, self.copyX, self.nJobs)
if index == 0: # In the first iteration we create the dictionary
dataDict = res
else:
for key in res: #Append values of each iteration in the dictionary per key
dataDict[key] = dataDict[key] + res[key]
return dataDict
class DRPxPyInteractionPxModel(MatrixData):
"""Linear Model Designed to find interactions between drug response and proteomics data, so the goal is to see what Drug Responses are impacted by a certain ppi (pY, pX)
Args:
MatrixData (_type_): _description_
Returns:
_type_: _description_
"""
def __init__(self, ppis:Iterable[tuple[str,str]],
proteomics:ProteinsMatrix,
drugGene:pd.DataFrame,
M:pd.DataFrame|pd.Series,
fitIntercept=True, copyX=True, nJobs:int=4,
filepath:str=None, data:pd.DataFrame=None,
**readerKwargs):
super().__init__(filepath, data, **readerKwargs)
self.ppis = ppis
self.proteomics = proteomics.data
self.drugRes = drugGene
self.M = M
self.fitIntercept = fitIntercept
self.copyX = copyX
self.nJobs = nJobs
self.drugGeneLen = drugGene.shape[1]
self.lenM = M.shape[1]
# def correctExtraSS(self): Outdated
# data = self.data['info'].copy()
# smallModelSSE = data['residSqSmall']
# largeModelSSE = data['residSqLarge']
# largeModelNumCov = 1 + self.lenM + 1 + 1 # 1 for drug response, lenM for M, 1 for Px, 1 for Px:drugResponse
# if self.isDrugResSmall:
# smallModelNumCov = 1 + self.lenM + 1 # 1 for drug response, lenM for M, 1 for Px
# else:
# smallModelNumCov = self.lenM + 1 # lenM for M, 1 for Px
# if self.fitIntercept: # The num of params estimated increases by one if we calculate the intercept
# largeModelNumCov += 1
# smallModelNumCov += 1
# statistic = smallModelSSE - largeModelSSE
# q = largeModelNumCov - smallModelNumCov
# n = self.data[('info', 'n')]
# largeDF = n - largeModelNumCov
# statisticNumerator = statistic / q
# statisticDenominator = largeModelSSE / largeDF
# statistic = statisticNumerator / statisticDenominator
# previousPValue = data['extraSSPValue']
# #Calculate p-value according to F distribution
# pValue = f.sf(statistic, q, largeDF)
# self.data.loc[:,('info','extraSSPValue')] = pValue
# #difference in change of the pValues
# pValueDiff = pValue - previousPValue
# print("Finnished Correcting the p-values")
# return pValueDiff
def fit(self, numOfCores = CPUS)->pd.DataFrame:
"""Fit each Px and Py pair towards every drug in the drugRes dataframe.
Calculate the Log likelihood p-value for the null hypothesis that the smaller model is correct, so the larger model does not add any covariate which is statistically significant.
Or so to say the wilk's or likelihood ratio test.
Returns:
pd.DataFrame: The results of the fitting process, with the following columns:
Py, Px, drug, n, intercept, PxBeta, adherentBeta, semiAdherentBeta, suspensionBeta, unknownBeta, drugResBeta, interactionBeta, llrPValue, llStatistic
"""
pararelList = zip(self.ppis, repeat(self))
with mp.Pool(numOfCores) as process:
pararelResults = process.starmap(ppiWrapper, pararelList)
for index, result in enumerate(pararelResults):
if index == 0:
results = result
else:
for key in result:
results[key] = results[key] + result[key]
results = pd.DataFrame(results, columns = results.keys())
self.data = results
return results
def resiCorr(self)->pd.DataFrame:
"""Calculates the correlation between the residuals of the large model and each drug and the residuals of the small model and each drug.
Using analysis of Variance or ANOVA Linear models, where we use categorical vars (drugs) to explain the variance in the residuals of the large and small models.
Returns:
pd.DataFrame: The correlation between the residuals of the large model and each drug and the residuals of the small model and each drug.
"""
data = self.data.copy()
#get only relevant columns
anovaData = pd.DataFrame(columns=['residSqSmall', 'residSqLarge', 'drug'])
anovaData['drug'] = data['info']['drug']
anovaData['residSqLarge'] = data['info']['residSqLarge']
anovaData['residSqSmall'] = data['info']['residSqSmall']
from resources import Anova
modelLarge = Anova(anovaData, False)
modelSmall = Anova(anovaData, False)
modelLarge = modelLarge.fitOneWay('drug', 'residSqLarge' )
modelSmall = modelSmall.fitOneWay('drug', 'residSqSmall')
coefsLarge = modelLarge.params
coefsSmall = modelSmall.params
#join both Dataframes to get the coefficients when y is the large or small model, with the respective column names
self.resiCorrResults = pd.concat([coefsLarge, coefsSmall], axis=1, keys=['residSqLarge', 'residSqSmall'])
return self.resiCorrResults
def volcanoPlot(
self,
filepath:str,
falseDiscoveryRate:float=0.001,
pValHzLine:float = 0.01,
extraFeatures:bool = False,
useExtraSS:bool = False,
diffCutOff:float=0):
"""Volcano plot in order to find statisticall relevant relationships.
Args:
filepath (str): Path to save the plot.
falseDiscoveryRate (float, optional): The corrected p-value at which we start to acknowledge a relevant interaction, independently of how many times an hypothesis was tested . Defaults to 0.01.
pValHzLine (float, optional): p-value line to draw on the plot, as a reference. Defaults to 0.001.
extraFeatures (bool, optional): If True, will plot the volcano plot with extra features as hue. All in separare files. The features are: Number of samples in common between Px, Py and Drug, how much the PPI is tested, how large is the fdr penalty, the PPI. Defaults to False.
useExtraSS (bool, optional): If True, will use the extra sum of squares p-value instead of the log likelihood p-value. Defaults to False.
diffCutOff (float, optional): If not 0, will only plot the points that have a difference in the residuals of the large and small models larger than diffCutOff. Defaults to 0.
"""
data = self.data.copy()
# Filter data by false discovery rate
if useExtraSS:
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
else:
data = data.loc[data['info']['fdrLLR'] < falseDiscoveryRate]
# Calculate the difference between large and small model's residuals in order to understand what X changes the model the most
if diffCutOff != 0:
data.loc[:,('info','residSqDiff')] = data.loc[:,('info','residSqLarge')] - data.loc[:,('info','residSqSmall')]
data = data.loc[abs(data[('info','residSqDiff')]) > diffCutOff]
# # Replace 0 p-values with the smallest possible value for so that log10 is defined
# data.loc[:,('info','llrPValue')] = data.loc[:,('info','llrPValue')].apply(lambda x: x if x != 0 else 1e-323)
if useExtraSS:
yValues = -np.log10(data['info']['extraSSPValue'])
else:
yValues = -np.log10(data['info']['llrPValue'])
xValues = data['effectSize']['interaction']
# Matplotlib set main axis font size
plt.rcParams["axes.titlesize"] = 22
# Matplotlib set legend font size
plt.rcParams["legend.fontsize"] = 22
# Matplotlib set tick label font size
plt.rcParams["axes.labelsize"] = 22
# Matplotlib set tick label font size
plt.rcParams["xtick.labelsize"] = 22
plt.rcParams["ytick.labelsize"] = 22
plt.figure(figsize=(20, 20), dpi=300)
# Plot
ax = sns.scatterplot(
x=xValues.values,
y=yValues.values,
color="k",
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
pValHzLine = 0.05 # Replace this value with the desired p-value
ax.axhline(-np.log10(pValHzLine), c="k", lw=0.5, ls="--", label=f"p-value = {pValHzLine}")
# Title
ax.set_title(f"Volcano plot")
ax.legend()
self.volcanoPath = filepath
plt.savefig(filepath, bbox_inches="tight")
plt.close()
if extraFeatures:
hueVars = {} # List to store all the extra vars to be used as hue in the scatter plot
#1st feature (Number of samples in common between Px, Py and Drug)
hueVars['samples'] = {'data': data['info']['n'], 'varType': 'numerical'}
#2nd feature (Number of other associations of that PPI with other drug, how much the PPI is tested, how large is the fdr penalty)
valuesCount = data.loc[:,[('info','Py'),('info', 'Px')]].value_counts()
hueVars['#tested']= {'data': data.apply(lambda row: valuesCount[row[('info','Py')], row[('info', 'Px')]], axis=1), 'varType': 'numerical'}
#3rd feature (Py)
hueVars['Py'] = {'data': data['info']['Py'], 'varType': 'categorical'}
#4th feature (Px)
hueVars['Px'] = {'data': data['info']['Px'], 'varType': 'categorical'}
#5th feature (Drug)
hueVars['drug'] = {'data': data['info']['drug'], 'varType': 'categorical'}
#6th feature (fdr)
hueVars['fdr'] = {'data': data['info']['fdrLLR'], 'varType': 'numerical'}
#7th feature (ppi)
hueVars['ppi'] = {'data': data['info']['Py'] + ';' + data['info']['Px'], 'varType': 'categorical'}
for hueVar in hueVars: # Iterate over all the extra features, and used them as hue in the scatterPlots
plt.figure(figsize=(20, 20), dpi=300)
ax = sns.scatterplot(
x=xValues,
y=yValues,
hue=hueVars[hueVar]['data'],
palette= sns.color_palette("viridis", as_cmap=True) if hueVars[hueVar]['varType'] == 'numerical' else sns.color_palette("hls", len(hueVars[hueVar]['data'].unique())) ,
legend= hueVars[hueVar]['varType'] == 'numerical', # Set the legend parameter to False
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
pValHzLine = 0.05 # Replace this value with the desired p-value
ax.axhline(-np.log10(pValHzLine), c="k", lw=0.5, ls="--", label=f"p-value = {pValHzLine}")
# Title and Legend
ax.set_title(f"Volcano plot with {hueVar} as hue")
ax.legend()
if hueVars[hueVar]['varType'] == 'numerical':
norm = matplotlib.colors.Normalize(vmin=hueVars[hueVar]['data'].min(), vmax=hueVars[hueVar]['data'].max())
sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
sm.set_array([])
ax.get_legend().remove()
ax.figure.colorbar(sm, label=str(hueVar))
# Save the plot
huePath = filepath.split('.png')[0] + hueVar + '.png'
plt.savefig(huePath, bbox_inches="tight")
plt.close()
def scatterTheTopVolcano(self, filepathMold:str, proteomics:ProteinsMatrix, drugRes:DrugResponseMatrix, typeOfInteraction:str, falseDiscoveryRate:float=0.10, topNumber:int=2, threhsQuantile:float=0):
data = self.data.copy()
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
# betaThresh = data['effectSize']['interaction'].quantile(threhsQuantile) # define a beta threshold based on a quantile given by the use
# data = data.loc[abs(data['effectSize']['interaction']) > betaThresh] # subset data to only include betas above the threshold
data = data.sort_values(by=[('info','llrPValue')], ascending=[True])
#Selecting top
top = data.iloc[0:topNumber,:]
#reseting index
top = top.reset_index(drop=True)
#iterate samples
for index, row in top.iterrows():
pValue = row['info']['llrPValue']
effectSize = row['effectSize']['interaction']
drug = row['info']['drug']
anotation = f'p-value: {pValue:.2e}\nβ: {effectSize:.2e} \ndrug: {drug} '
anotation = {'text':anotation, 'xy':(0.1, 0.8), 'xycoords':'axes fraction', 'fontsize':10}
filepath = filepathMold.split('.png')[0] + 'top'+ str(index) +'.png'
ppi = row['info']['Px'] + ';' + row['info']['Py']
proteomics.plotPxPy3DimContinous(drug, ppi, drugRes.data, typeOfInteraction,filepath, **anotation)
def triangulate(
self,
volcanoXMin:float,
volcanoXMax:float,
volcanoYMin:float,
volcanoYMax:float,
typeOfInteraction:str,
scatter:int = 0,
filepathMold:str|None='',
interactive:bool = False,
diffCutOff:float = 0,
falseDiscoveryRate:float = 0.01
)->pd.DataFrame:
"""Triangulate the model results data according to the volcano plot thresholds
Args:
volcanoXMin (float): The minimum interaction effect size value for the x axis
volcanoXMax (float): The maximum interaction effect size value for the x axis
volcanoYMin (float): The minimum -np.log10(p-value) value for the y axis
volcanoYMax (float): The maximum -np.log10(p-value) value for the y axis
scatter (int, optional): The number of associations to scatter. Defaults to 0.
interactive (bool, optional): If True, it will show the volcano plot with the possibility of selecting a point, and scattering it with Drug Response as Color and Size. Defaults to False.
typeOfInteraction (str): Type of interaction, can be 'drug response' or 'gene essentiality'
Returns:
pd.DataFrame: Data according to the volcano plot thresholds
"""
data = self.data.copy()
# Filter data by false discovery rate
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
data = data.loc[(data['effectSize']['interaction'] >= volcanoXMin) & (data['effectSize']['interaction'] <= volcanoXMax)]
data = data.loc[(-np.log10(data['info']['llrPValue']) >= volcanoYMin) & (-np.log10(data['info']['llrPValue']) <= volcanoYMax)]
if diffCutOff != 0:
data.loc[:,('info','residSqDiff')] = data.loc[:,('info','residSqLarge')] - data.loc[:,('info','residSqSmall')]
data = data.loc[abs(data[('info','residSqDiff')]) > diffCutOff]
data = data.sort_values(by=[('info','llrPValue')], ascending=[True])
if interactive: #It will show the original dark plot, but it will allow the user to select a point, and scatter it with Drug Response as Color and Size
yValues = -np.log10(data['info']['llrPValue'])
xValues = data['effectSize']['interaction']
fig = plt.figure(figsize=(60,60), dpi=300)
ax = fig.add_subplot(111)
ax = sns.scatterplot(
x=xValues,
y=yValues,
color="k",
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
picker=True,
ax=ax
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
#Change x and y range according to the volcano plot thresholds
ax.set_xlim(volcanoXMin, volcanoXMax)
ax.set_ylim(volcanoYMin, volcanoYMax)
# Title
ax.set_title(f"Volcano plot")
# Function to handle pick events
def picker(event):
ind = event.ind[0] # Get the index of the selected point
selected = data.iloc[[ind],:] # The double bracket is so that the retrieved object is a dataframe and not a series
plt.gcf().canvas.mpl_disconnect(mouseEvent) # Disconnect the pick event handler
plt.close(fig) # Close the figure
print(selected)
# Scatter the selected point
self.scatter(1, filepathMold, typeOfInteraction,selected)
fig.show()
# Connect the pick event handler to the scatter plot
mouseEvent = plt.gcf().canvas.mpl_connect("pick_event", picker)
else:
if scatter > 0:
self.scatter(scatter, filepathMold, typeOfInteraction,data)
return data
def scatter(
self,
topNumber:int,
filepathMold:str|None,
typeOfInteraction:str,
data:pd.DataFrame = None,
drugRes:DrugResponseMatrix = read(PATH + '/internal/drugResponses/drugResponse.pickle.gz'),
proteomics:ProteinsMatrix = read(PATH + '/internal/proteomics/ogProteomics.pickle.gz')):
""" Scatter the first topNumber associations in data or self.data
Args:
topNumber (int): Number of associations to scatter
filepathMold (str): Filepath template to save the scatter plots
data (_type_, optional): Data to use instead of the objects full result matrix, comming out of the linear Model. Defaults to None.
drugRes (DrugResponseMatrix, optional): Drug response Object. Defaults to read(PATH + '/internal/drugResponses/drugResponse.pickle.gz').
proteomics (ProteinsMatrix, optional): Proteomics Object used for scatter. Defaults to read(PATH + '/internal/proteomics/ogProteomics.pickle.gz').
typeOfInteraction (str): Type of interaction, can be 'drug response' or 'gene essentiality'
"""
if data is None:
data = self.data.copy()
top = data.iloc[0:topNumber,:]
#reseting index
top = top.reset_index(drop=True)
#iterate samples
for index, row in top.iterrows():
pValue = row['info']['llrPValue']
effectSize = row['effectSize']['interaction']
drug = row['info']['drug']
anotation = f'p-value: {pValue:.2e}\nβ: {effectSize:.2e} \ndrug: {drug} '
anotation = {'text':anotation, 'xy':(0.1, 0.8), 'xycoords':'axes fraction', 'fontsize':10}
if filepathMold is not None:
filepath = filepathMold.split('.png')[0] + 'top'+ str(index) +'.png'
else:
filepath = None
ppi = row['info']['Px'] + ';' + row['info']['Py']
proteomics.plotPxPy3DimContinous(drug, ppi, drugRes.data.T, typeOfInteraction, filepath, **anotation)
def processPPIWrapper(self, ppi:tuple[str, str]) -> dict:
"""Wrapper for fitting the 2 linear models of Py ~ Px and Px ~ Py, so that it can be used in a multiprocessing pool
Args:
ppi (tuple[str, str]): Names of Py and Px
Returns:
dict: The results of the 2 linear models, one for Py ~ Px and the other for Px ~ Py
"""
def getLinearModels(self, YName, XName, drugName) -> tuple[LinearRegression, LinearRegression, dict]:
"""Get the Linear Models (Larger and Smaller) for the given Protein X and Y Names
It does this by subsetting the proteomics, drugRes, and M dataframes to only include samples common to all dataframes.
And then builds the OLS models from statsmodels.api library.
Args:
YName (str): Protein Y name
XName (str): Protein X name
drugName (str): Drug name
Returns:
sm.OLS: larger Linear Model from statsmodels.api library
sm.OLS: Smaller Linear Model from statsmodels.api library
dict: Results of the linear model in dictionary format, with keys being effect size, p-value and other general info
"""
Py = self.proteomics.loc[:,YName]
Py = Py.dropna()
Px = self.proteomics.loc[:,XName]
Px = Px.dropna()
M = self.M
M = M.dropna(axis=0)
drugRes = self.drugRes.loc[:,drugName]
drugRes = drugRes.fillna(drugRes.mean())
#get samples common to all dataframes
samplesCommon = list(set.intersection(
set(Py.index), set(Px.index), set(drugRes.index), set(M.index)
))# samples common to all dataframes
samplesCommon.sort()
#number of samples in common, n
n = len(samplesCommon)
#subset dataframes to common samples
Py = Py.loc[samplesCommon]
Px = Px.loc[samplesCommon]
drugRes = drugRes.loc[samplesCommon]
M = M.loc[samplesCommon]
if self.standardisePx: # Zscore Px if standardisePx is True
Px = (Px - Px.mean()) / Px.std()
drugRes = (drugRes - drugRes.mean()) / drugRes.std()
pxInteractionDR = drugRes.mul(Px, axis=0) # dR * Px
pxInteractionDR.name = "interaction" # rename the column to be the interaction term
#reordering of expressions to build the smaller and larger models
# Small Model: Py ~ (Px + M)
# Large Model: Py ~ (Px + M) + (dr + Px:dR)
if self.isDrugResSmall:
X = pd.concat([pxInteractionDR], axis=1)
M = pd.concat([Px, M, drugRes], axis=1)
else:
X = pd.concat([drugRes, pxInteractionDR], axis=1)
M = pd.concat([Px, M], axis=1)
# Fit Confounding, small model
lmSmall = modelRegressor(self).fit(M, Py)
lmSmallLogLike = loglike(Py, lmSmall.predict(M))
# Fit Confounding + features, Large model
xLarge = pd.concat([M, X], axis=1)
# Make sure all columns are strings
xLarge.columns = xLarge.columns.astype(str)
lmLarge = modelRegressor(self).fit(xLarge, Py)
lmLargeLogLike = loglike(Py, lmLarge.predict(xLarge))
#Calculating Residuals (Small model)
lmSmallResidualsSq = np.power(Py - lmSmall.predict(M), 2)
#Calculating Residuals (Large model)
lmLargeResidualsSq = np.power(Py - lmLarge.predict(xLarge), 2)
# Log-ratio test
lr = 2 * (lmLargeLogLike - lmSmallLogLike)
LogLikeliRatioPVal = chi2.sf(lr, X.shape[1])
# Extra sum of squares test
if self.fitIntercept: # If the model has an intercept, then we need to add 1 to the number of covariates in the large and small models, because we are calculating an extra parameter, the intercept
extraPValue = extraSumSquares(xLarge.shape[1] + 1, M.shape[1] + 1, Py, lmLarge.predict(xLarge), lmSmall.predict(M))
else:
extraPValue = extraSumSquares(xLarge.shape[1], M.shape[1], Py, lmLarge.predict(xLarge), lmSmall.predict(M))
coefs = lmLarge.coef_
columns = ['Px'] + self.M.columns.tolist() + ['drug'] + ['interaction']
columns = [('effectSize', col) for col in columns]
res = {col:[coefs[index]] for index,col in enumerate(columns)}
res[('info', 'Py')] = [YName]
res[('info', 'Px')] = [XName]
res[('info', 'drug')] = [drugName]
res[('info', 'n')] = [n]
res[('info', 'llrPValue')] = [LogLikeliRatioPVal]
res[('info', 'extraSSPValue')] = [extraPValue]
res[('info', 'llStatistic')] = [lr]
res[('info', 'intercept')] = [lmLarge.intercept_]
res[('info', 'residSqLarge')] = [lmLargeResidualsSq.sum()]
res[('info', 'residSqSmall')] = [lmSmallResidualsSq.sum()]
res[('info', 'fdrLLR')] = list(multipletests(res[('info', 'llrPValue')], method="fdr_bh")[1])
res[('info', 'fdrExtraSS')] = list(multipletests(res[('info', 'extraSSPValue')], method="fdr_bh")[1])
return lmLarge, lmSmall, res
for index, drugName in enumerate(self.drugRes):
YName = ppi[0]
XName = ppi[1]
_, _, res1= getLinearModels(self, YName, XName, drugName)
if index == 0: # If first drug, then we want to create the dictionary that will be used to save the results from all other drugs
results = res1 # Create dictionary, results, that will be used to save the results from all other drugs~
else:
for key in results:
results[key] = results[key] + res1[key]
return results
class PyPxDrugInteractionModel(MatrixData):
"""Linear Model Designed to find interactions between drug response and proteomics data, so the goal is to see what Drug Responses are impacted by a certain ppi (pY, pX)
Args:
MatrixData (_type_): _description_
Returns:
_type_: _description_
"""
def __init__(self, ppis:Iterable[tuple[str,str]],
proteomics:ProteinsMatrix,
interactor:pd.DataFrame,
M:pd.DataFrame|pd.Series,
isDrugResSmall:bool = True,
fitIntercept=True, copyX=True,
standardisePx = True, nJobs:int=4,
filepath:str=None, data:pd.DataFrame=None,
**readerKwargs):
super().__init__(filepath, data, **readerKwargs)
newSet = set()
for pair in ppis:
newSet.add(pair)
newSet.add(tuple(reversed(pair))) # Add the reverse of the pair, so that we can check for one way relationships
ppis = newSet
self.ppis = ppis
self.proteomics = proteomics.data
self.drugRes = interactor
self.M = M
self.isDrugResSmall = isDrugResSmall
self.fitIntercept = fitIntercept
self.copyX = copyX
self.standardisePx = standardisePx
self.nJobs = nJobs
self.drugResLen = interactor.shape[1]
self.lenM = M.shape[1]
def correctExtraSS(self):
data = self.data['info'].copy()
smallModelSSE = data['residSqSmall']
largeModelSSE = data['residSqLarge']
largeModelNumCov = 1 + self.lenM + 1 + 1 # 1 for drug response, lenM for M, 1 for Px, 1 for Px:drugResponse
if self.isDrugResSmall:
smallModelNumCov = 1 + self.lenM + 1 # 1 for drug response, lenM for M, 1 for Px
else:
smallModelNumCov = self.lenM + 1 # lenM for M, 1 for Px
if self.fitIntercept: # The num of params estimated increases by one if we calculate the intercept
largeModelNumCov += 1
smallModelNumCov += 1
statistic = smallModelSSE - largeModelSSE
q = largeModelNumCov - smallModelNumCov
n = self.data[('info', 'n')]
largeDF = n - largeModelNumCov
statisticNumerator = statistic / q
statisticDenominator = largeModelSSE / largeDF
statistic = statisticNumerator / statisticDenominator
previousPValue = data['extraSSPValue']
#Calculate p-value according to F distribution
pValue = f.sf(statistic, q, largeDF)
self.data.loc[:,('info','extraSSPValue')] = pValue
#difference in change of the pValues
pValueDiff = pValue - previousPValue
print("Finnished Correcting the p-values")
return pValueDiff
def fit(self, numOfCores = CPUS)->pd.DataFrame:
"""Fit each Px and Py pair towards every drug in the drugRes dataframe.
Calculate the Log likelihood p-value for the null hypothesis that the smaller model is correct, so the larger model does not add any covariate which is statistically significant.
Or so to say the wilk's or likelihood ratio test.
Returns:
pd.DataFrame: The results of the fitting process, with the following columns:
Py, Px, drug, n, intercept, PxBeta, adherentBeta, semiAdherentBeta, suspensionBeta, unknownBeta, drugResBeta, interactionBeta, llrPValue, llStatistic
"""
pararelList = zip(repeat(self), self.ppis)
with mp.Pool(numOfCores) as process:
pararelResults = process.starmap(processPPIWrapper, pararelList)
for index, result in enumerate(pararelResults):
if index == 0:
results = result
else:
for key in result:
results[key] = results[key] + result[key]
results = pd.DataFrame(results, columns = pd.MultiIndex.from_tuples(results.keys()))
self.data = results
return results
def resiCorr(self)->pd.DataFrame:
"""Calculates the correlation between the residuals of the large model and each drug and the residuals of the small model and each drug.
Using analysis of Variance or ANOVA Linear models, where we use categorical vars (drugs) to explain the variance in the residuals of the large and small models.
Returns:
pd.DataFrame: The correlation between the residuals of the large model and each drug and the residuals of the small model and each drug.
"""
data = self.data.copy()
#get only relevant columns
anovaData = pd.DataFrame(columns=['residSqSmall', 'residSqLarge', 'drug'])
anovaData['drug'] = data['info']['drug']
anovaData['residSqLarge'] = data['info']['residSqLarge']
anovaData['residSqSmall'] = data['info']['residSqSmall']
from resources import Anova
modelLarge = Anova(anovaData, False)
modelSmall = Anova(anovaData, False)
modelLarge = modelLarge.fitOneWay('drug', 'residSqLarge' )
modelSmall = modelSmall.fitOneWay('drug', 'residSqSmall')
coefsLarge = modelLarge.params
coefsSmall = modelSmall.params
#join both Dataframes to get the coefficients when y is the large or small model, with the respective column names
self.resiCorrResults = pd.concat([coefsLarge, coefsSmall], axis=1, keys=['residSqLarge', 'residSqSmall'])
return self.resiCorrResults
def volcanoPlot(
self,
filepath:str,
falseDiscoveryRate:float=0.001,
pValHzLine:float = 0.01,
extraFeatures:bool = False,
useExtraSS:bool = False,
diffCutOff:float=0):
"""Volcano plot in order to find statisticall relevant relationships.
Args:
filepath (str): Path to save the plot.
falseDiscoveryRate (float, optional): The corrected p-value at which we start to acknowledge a relevant interaction, independently of how many times an hypothesis was tested . Defaults to 0.01.
pValHzLine (float, optional): p-value line to draw on the plot, as a reference. Defaults to 0.001.
extraFeatures (bool, optional): If True, will plot the volcano plot with extra features as hue. All in separare files. The features are: Number of samples in common between Px, Py and Drug, how much the PPI is tested, how large is the fdr penalty, the PPI. Defaults to False.
useExtraSS (bool, optional): If True, will use the extra sum of squares p-value instead of the log likelihood p-value. Defaults to False.
diffCutOff (float, optional): If not 0, will only plot the points that have a difference in the residuals of the large and small models larger than diffCutOff. Defaults to 0.
"""
data = self.data.copy()
# Filter data by false discovery rate
if useExtraSS:
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
else:
data = data.loc[data['info']['fdrLLR'] < falseDiscoveryRate]
# Calculate the difference between large and small model's residuals in order to understand what X changes the model the most
if diffCutOff != 0:
data.loc[:,('info','residSqDiff')] = data.loc[:,('info','residSqLarge')] - data.loc[:,('info','residSqSmall')]
data = data.loc[abs(data[('info','residSqDiff')]) > diffCutOff]
# # Replace 0 p-values with the smallest possible value for so that log10 is defined
# data.loc[:,('info','llrPValue')] = data.loc[:,('info','llrPValue')].apply(lambda x: x if x != 0 else 1e-323)
if useExtraSS:
yValues = -np.log10(data['info']['extraSSPValue'])
else:
yValues = -np.log10(data['info']['llrPValue'])
xValues = data['effectSize']['interaction']
# Matplotlib set main axis font size
plt.rcParams["axes.titlesize"] = 22
# Matplotlib set legend font size
plt.rcParams["legend.fontsize"] = 22
# Matplotlib set tick label font size
plt.rcParams["axes.labelsize"] = 22
# Matplotlib set tick label font size
plt.rcParams["xtick.labelsize"] = 22
plt.rcParams["ytick.labelsize"] = 22
plt.figure(figsize=(20, 20), dpi=300)
# Plot
ax = sns.scatterplot(
x=xValues.values,
y=yValues.values,
color="k",
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
pValHzLine = 0.05 # Replace this value with the desired p-value
ax.axhline(-np.log10(pValHzLine), c="k", lw=0.5, ls="--", label=f"p-value = {pValHzLine}")
# Title
ax.set_title(f"Volcano plot")
ax.legend()
self.volcanoPath = filepath
plt.savefig(filepath, bbox_inches="tight")
plt.close()
if extraFeatures:
hueVars = {} # List to store all the extra vars to be used as hue in the scatter plot
#1st feature (Number of samples in common between Px, Py and Drug)
hueVars['samples'] = {'data': data['info']['n'], 'varType': 'numerical'}
#2nd feature (Number of other associations of that PPI with other drug, how much the PPI is tested, how large is the fdr penalty)
valuesCount = data.loc[:,[('info','Py'),('info', 'Px')]].value_counts()
hueVars['#tested']= {'data': data.apply(lambda row: valuesCount[row[('info','Py')], row[('info', 'Px')]], axis=1), 'varType': 'numerical'}
#3rd feature (Py)
hueVars['Py'] = {'data': data['info']['Py'], 'varType': 'categorical'}
#4th feature (Px)
hueVars['Px'] = {'data': data['info']['Px'], 'varType': 'categorical'}
#5th feature (Drug)
hueVars['drug'] = {'data': data['info']['drug'], 'varType': 'categorical'}
#6th feature (fdr)
hueVars['fdr'] = {'data': data['info']['fdrLLR'], 'varType': 'numerical'}
#7th feature (ppi)
hueVars['ppi'] = {'data': data['info']['Py'] + ';' + data['info']['Px'], 'varType': 'categorical'}
for hueVar in hueVars: # Iterate over all the extra features, and used them as hue in the scatterPlots
plt.figure(figsize=(20, 20), dpi=300)
ax = sns.scatterplot(
x=xValues,
y=yValues,
hue=hueVars[hueVar]['data'],
palette= sns.color_palette("viridis", as_cmap=True) if hueVars[hueVar]['varType'] == 'numerical' else sns.color_palette("hls", len(hueVars[hueVar]['data'].unique())) ,
legend= hueVars[hueVar]['varType'] == 'numerical', # Set the legend parameter to False
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
pValHzLine = 0.05 # Replace this value with the desired p-value
ax.axhline(-np.log10(pValHzLine), c="k", lw=0.5, ls="--", label=f"p-value = {pValHzLine}")
# Title and Legend
ax.set_title(f"Volcano plot with {hueVar} as hue")
ax.legend()
if hueVars[hueVar]['varType'] == 'numerical':
norm = matplotlib.colors.Normalize(vmin=hueVars[hueVar]['data'].min(), vmax=hueVars[hueVar]['data'].max())
sm = plt.cm.ScalarMappable(cmap="viridis", norm=norm)
sm.set_array([])
ax.get_legend().remove()
ax.figure.colorbar(sm, label=str(hueVar))
# Save the plot
huePath = filepath.split('.png')[0] + hueVar + '.png'
plt.savefig(huePath, bbox_inches="tight")
plt.close()
def scatterTheTopVolcano(self, filepathMold:str, proteomics:ProteinsMatrix, drugRes:DrugResponseMatrix, typeOfInteraction:str, falseDiscoveryRate:float=0.10, topNumber:int=2, threhsQuantile:float=0):
data = self.data.copy()
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
# betaThresh = data['effectSize']['interaction'].quantile(threhsQuantile) # define a beta threshold based on a quantile given by the use
# data = data.loc[abs(data['effectSize']['interaction']) > betaThresh] # subset data to only include betas above the threshold
data = data.sort_values(by=[('info','llrPValue')], ascending=[True])
#Selecting top
top = data.iloc[0:topNumber,:]
#reseting index
top = top.reset_index(drop=True)
#iterate samples
for index, row in top.iterrows():
pValue = row['info']['llrPValue']
effectSize = row['effectSize']['interaction']
drug = row['info']['drug']
anotation = f'p-value: {pValue:.2e}\nβ: {effectSize:.2e} \ndrug: {drug} '
anotation = {'text':anotation, 'xy':(0.1, 0.8), 'xycoords':'axes fraction', 'fontsize':10}
filepath = filepathMold.split('.png')[0] + 'top'+ str(index) +'.png'
ppi = row['info']['Px'] + ';' + row['info']['Py']
proteomics.plotPxPy3DimContinous(drug, ppi, drugRes.data, typeOfInteraction,filepath, **anotation)
def triangulate(
self,
volcanoXMin:float,
volcanoXMax:float,
volcanoYMin:float,
volcanoYMax:float,
typeOfInteraction:str,
scatter:int = 0,
filepathMold:str|None='',
interactive:bool = False,
diffCutOff:float = 0,
falseDiscoveryRate:float = 0.01
)->pd.DataFrame:
"""Triangulate the model results data according to the volcano plot thresholds
Args:
volcanoXMin (float): The minimum interaction effect size value for the x axis
volcanoXMax (float): The maximum interaction effect size value for the x axis
volcanoYMin (float): The minimum -np.log10(p-value) value for the y axis
volcanoYMax (float): The maximum -np.log10(p-value) value for the y axis
scatter (int, optional): The number of associations to scatter. Defaults to 0.
interactive (bool, optional): If True, it will show the volcano plot with the possibility of selecting a point, and scattering it with Drug Response as Color and Size. Defaults to False.
typeOfInteraction (str): Type of interaction, can be 'drug response' or 'gene essentiality'
Returns:
pd.DataFrame: Data according to the volcano plot thresholds
"""
data = self.data.copy()
# Filter data by false discovery rate
data = data.loc[data['info']['fdrExtraSS'] < falseDiscoveryRate]
data = data.loc[(data['effectSize']['interaction'] >= volcanoXMin) & (data['effectSize']['interaction'] <= volcanoXMax)]
data = data.loc[(-np.log10(data['info']['llrPValue']) >= volcanoYMin) & (-np.log10(data['info']['llrPValue']) <= volcanoYMax)]
if diffCutOff != 0:
data.loc[:,('info','residSqDiff')] = data.loc[:,('info','residSqLarge')] - data.loc[:,('info','residSqSmall')]
data = data.loc[abs(data[('info','residSqDiff')]) > diffCutOff]
data = data.sort_values(by=[('info','llrPValue')], ascending=[True])
if interactive: #It will show the original dark plot, but it will allow the user to select a point, and scatter it with Drug Response as Color and Size
yValues = -np.log10(data['info']['llrPValue'])
xValues = data['effectSize']['interaction']
fig = plt.figure(figsize=(60,60), dpi=300)
ax = fig.add_subplot(111)
ax = sns.scatterplot(
x=xValues,
y=yValues,
color="k",
s=15,
alpha=0.8,
edgecolors="none",
rasterized=True,
picker=True,
ax=ax
)
# Labels
ax.set_xlabel(r"$\beta$")
ax.set_ylabel(r"$-\log_{10}(p-value)$")
# Grid
ax.axvline(0, c="k", lw=0.5, ls="--")
#Change x and y range according to the volcano plot thresholds
ax.set_xlim(volcanoXMin, volcanoXMax)
ax.set_ylim(volcanoYMin, volcanoYMax)
# Title
ax.set_title(f"Volcano plot")
# Function to handle pick events
def picker(event):
ind = event.ind[0] # Get the index of the selected point
selected = data.iloc[[ind],:] # The double bracket is so that the retrieved object is a dataframe and not a series
plt.gcf().canvas.mpl_disconnect(mouseEvent) # Disconnect the pick event handler
plt.close(fig) # Close the figure
print(selected)
# Scatter the selected point
self.scatter(1, filepathMold, typeOfInteraction,selected)
fig.show()
# Connect the pick event handler to the scatter plot
mouseEvent = plt.gcf().canvas.mpl_connect("pick_event", picker)
else:
if scatter > 0:
self.scatter(scatter, filepathMold, typeOfInteraction,data)
return data
def scatter(
self,
topNumber:int,
filepathMold:str|None,
typeOfInteraction:str,
data:pd.DataFrame = None,
drugRes:DrugResponseMatrix = read(PATH + '/internal/drugResponses/drugResponse.pickle.gz'),
proteomics:ProteinsMatrix = read(PATH + '/internal/proteomics/ogProteomics.pickle.gz')):
""" Scatter the first topNumber associations in data or self.data
Args:
topNumber (int): Number of associations to scatter
filepathMold (str): Filepath template to save the scatter plots
data (_type_, optional): Data to use instead of the objects full result matrix, comming out of the linear Model. Defaults to None.
drugRes (DrugResponseMatrix, optional): Drug response Object. Defaults to read(PATH + '/internal/drugResponses/drugResponse.pickle.gz').
proteomics (ProteinsMatrix, optional): Proteomics Object used for scatter. Defaults to read(PATH + '/internal/proteomics/ogProteomics.pickle.gz').
typeOfInteraction (str): Type of interaction, can be 'drug response' or 'gene essentiality'
"""
if data is None:
data = self.data.copy()
top = data.iloc[0:topNumber,:]
#reseting index
top = top.reset_index(drop=True)
#iterate samples
for index, row in top.iterrows():
pValue = row['info']['llrPValue']
effectSize = row['effectSize']['interaction']
drug = row['info']['drug']
anotation = f'p-value: {pValue:.2e}\nβ: {effectSize:.2e} \ndrug: {drug} '
anotation = {'text':anotation, 'xy':(0.1, 0.8), 'xycoords':'axes fraction', 'fontsize':10}
if filepathMold is not None:
filepath = filepathMold.split('.png')[0] + 'top'+ str(index) +'.png'
else:
filepath = None
ppi = row['info']['Px'] + ';' + row['info']['Py']
proteomics.plotPxPy3DimContinous(drug, ppi, drugRes.data.T, typeOfInteraction, filepath, **anotation) | QuantitativeBiology/MiguelThesisPrecisionMed | src/resources/classes.py | classes.py | py | 135,744 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.getsizeof",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "gzip.open",
"line_num... |
18672719269 | import json
import logging
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import configparser
def init_driver():
config = configparser.ConfigParser()
config.read('./core_framework/util/config.ini')
default_setting = config['default_setting']
options = Options()
options.chrome_executable_path = default_setting
driver = webdriver.Chrome(options=options)
return driver
class CoreDriver():
driver: WebDriver
delay = 3
def __init__(self, driver=None) -> None:
if driver is not None:
self.driver = driver
self.log = logging.getLogger(__name__)
def get_element(self, name=None, xpath=None) -> WebElement:
self.log.info(f"Get {name} xpath: {xpath}")
try:
element = WebDriverWait(self.driver, self.delay).until(
EC.presence_of_element_located((By.XPATH, xpath)))
except:
self.log.error(f'Could not find {name}, xpath is {xpath}')
element = None
return element
def load_cookie(self):
with open("token.json") as f:
cookies = json.load(f)
for cookie in cookies:
self.driver.add_cookie(cookie)
def page_refresh(self):
self.driver.refresh()
def wait_page_loading(self):
WebDriverWait(self.driver, self.delay).until_not(
EC.presence_of_element_located((By.XPATH, "//*[@class='qZrMO2']")))
| rnjacky777/automation_framework | core_framework/core_framework.py | core_framework.py | py | 1,742 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
... |
16521824851 | ###read training peptide input
import sys, os, re
from collections import OrderedDict
def readtrainfile(length, trainfile_peptide):
if os.path.exists(trainfile_peptide) == False:
print('Error: "' + trainfile_peptide + '" does not exist.')
sys.exit(1)
upscaleAA = ['A', 'R', 'N', 'D', 'V', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'C']
unusualAA = ['B', 'J', 'O', 'U', 'X', 'Z', 'b', 'j', 'o', 'u', 'x', 'z']
peptides, labels = [], []
with open(trainfile_peptide, 'rt') as f:
content = f.readlines()
f.seek(0, 0)
content_txt = f.read()
###check unusual amino acid
for eachAA in unusualAA:
if eachAA in content_txt:
print('Peptides contain unnatural amino acid, eg: B, J, O, U, X, Z')
sys.exit(1)
rownum, posstart, numpos, negstart = 1, 0, 0, 0
for eachline in content:
###check pos label at first row
if rownum == 1:
if '1' in eachline:
posstart += 1
else:
print("Please put the label '1' at the first row.")
sys.exit(1)
rownum -= 1
elif '-1' in eachline:
negstart += 1
elif posstart == 1 and negstart == 0:
peptide = (eachline.split('\n')[0]).upper()
###check unrecognised character
for eachAA in peptide:
if eachAA not in upscaleAA:
print('Unrecognized character exists in your training file')###check characters
sys.exit(1)
###check peptide length
if len(peptide) == int(length):
peptides.append(peptide)
labels.append('1')
numpos += 1
elif len(peptide) == 0:
continue
else:
print('The length of training peptide is not same with the length you choose')
sys.exit(1)
else:
peptide = (eachline.split('\n')[0]).upper()
###check unrecognised character
for eachAA in peptide:
if eachAA not in upscaleAA:
print('Unrecognised character exists in your training file')###check characters
sys.exit(1)
###check peptide length
if len(peptide) == int(length):
peptides.append(peptide)
labels.append('-1')
elif len(peptide) == 0:
continue
else:
print('The length of training peptide is not same with the length you choose')
sys.exit(1)
###check number of training data
if len(set(peptides)) < 5:
print('The number of peptide in training set should be >= 5 unique instances')
sys.exit(1)
dict_labelpep = OrderedDict()
for items in enumerate(peptides):
dict_labelpep[items[1]] = labels[items[0]]
if '-1' not in labels:
return(dict_labelpep, 'NO')
else:
dict_possetneg = OrderedDict()
dict_possetneg['neg'] = OrderedDict()
for labelitems in enumerate(labels):
if labelitems[1] == '1':
dict_possetneg.setdefault('pos', []).append(peptides[labelitems[0]])
elif labelitems[1] == '-1':
dict_possetneg['neg'].setdefault('0', []).append(peptides[labelitems[0]])
return(dict_possetneg, 'YES') | 17shutao/Anthem | bin/sware_j_newreadfiletrain.py | sware_j_newreadfiletrain.py | py | 3,542 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 23,
... |
38633646472 | #imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from main import number_of_pendulums
class Plotting():
def __init__(self, pendulum_number, pendulum_list, dataframe, y):
self.pendulum_number = pendulum_number
self.pendulum_list = pendulum_list
self.dataframe = dataframe
self.y = y
# here a list is created where each item is the full data for one pendulum
def create_pendulum_data_list(self):
for k in range(1, self.pendulum_number+1):
p = self.dataframe.loc[self.dataframe['pendulum'] == k]
self.pendulum_list.append(p)
# this loop plots the correct data inside each list item of thependulum list
def create_plotting_data(self):
for i in range(0, self.pendulum_number):
damping = list((self.pendulum_list[i])['damping'])[0]
plt.plot(list((self.pendulum_list[0])['time']), list((self.pendulum_list[i])[self.y]), label=damping)
def create_plotting_data_for_energies(self):
for i in range(0, self.pendulum_number):
plt.plot(list((self.pendulum_list[0])['time']), list((self.pendulum_list[i])[self.y]), label=self.y) | EdwardTAllen/Damped-pendulum | plotting_methods.py | plotting_methods.py | py | 1,213 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "matpl... |
34219146493 | from __future__ import absolute_import, unicode_literals, division, print_function
"""Scrape contacts from a Google email account.
Usage:
- First, you need an OAuth2 access token. Run e.g.
$ python2 oauth.py --client_id="..." --client_secret="..." --user=jdoe@example.com --generate_oauth2_token
- Open the URL in a web browser, log in as normal, and then copy and paste the code you're given in the script window.
You will get, among other things, an access token.
- Run e.g.
$ imap-get-correspondents --username=jdoe@example.com --access-token="..."
Features:
- Caches message information in a local SQLite database so that the program can be restarted if it exits and so that
subsequent runs of the program do not have to start from the beginning; only new messages will need to be processed.
- Detection of automated messages (by headers).
- Detection of mailing list messages (by headers).
- Heuristic detection of addresses that "look automated" based on the local or domain parts of the email address.
- Heuristic for selecting a name when multiple names have been seen associated with a single email address.
Implementation notes:
- We can't directly connect to Google's IMAP servers with a username and password unless we enable "access from
less-secure applications". Instead, we need to use the "OAuth2 for Installed Applications" flow:
https://developers.google.com/identity/protocols/OAuth2InstalledApp
"""
import sys
import imaplib
import argparse
import email
import email.header
from .util import build_xoauth2_auth_string, is_automated, merge_addresses
from .models import open_db, populate_message, Message
PY2 = sys.version_info < (3,)
if PY2:
text_type = unicode
binary_type = str
else:
text_type = str
binary_type = bytes
GMAIL_ALL_MAIL = '"[Gmail]/All Mail"'
if PY2:
raise Exception('Has not been tested with Python 2!')
class ImapConnection(object):
def __init__(self, auth_string, debug=False):
self.auth_string = auth_string
self.debug = debug
self.imap_conn = None
def connect(self):
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
if self.debug:
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: self.auth_string)
self.imap_conn = imap_conn
# Returns strings like
# (\All \HasNoChildren) "/" "[Gmail]/All Mail"
# (We aren't fully parsing the result.)
def iter_directories(self):
rt, rval = self.imap_conn.list()
assert rt == 'OK' # "response type"; usually "OK" or "NO"
for s in rval:
yield s.decode('utf-8')
# Yields (uid, msg) pairs.
def iter_messages(self, dir_name=None, limit=None):
for i, uid in enumerate(self.iter_uids(dir_name=dir_name)):
if limit is not None and i >= limit:
break
yield (uid, self.fetch_message(uid=uid))
def iter_uids(self, dir_name=None):
self.imap_conn.select(dir_name)
rt, rval = self.imap_conn.uid('SEARCH', None, '(ALL)')
assert rt == 'OK'
assert len(rval) == 1
for s in rval[0].decode('utf-8').split():
yield int(s)
def fetch_message(self, uid):
assert isinstance(uid, int)
rt, rval = self.imap_conn.uid('FETCH', text_type(uid), '(BODY.PEEK[])')
assert rt == 'OK'
if rval[0] is None:
raise KeyError('No message with UID {}'.format(uid))
return email.message_from_bytes(rval[0][1]) # `policy` arg
def build_parser():
p = argparse.ArgumentParser()
p.add_argument('--username', help='e.g. jdoe@gmail.com')
p.add_argument('--access-token')
return p
def imap_connect(args):
c = ImapConnection(
auth_string=build_xoauth2_auth_string(args.username, args.access_token),
debug=True,
)
c.connect()
return c
def main_get_correspondents():
args = build_parser().parse_args()
c = imap_connect(args)
db = open_db('./correspondents.db')
# Pull all of the data that we will need into our local cache. We deliberately defer filtering or cleaning up data
# so that we have the original data; that way we can tweak filters/etc. locally.
#
# N.B.: This use of UIDs is probably fine for my purposes, but there are a lot of tricky, sharp edges around IMAP
# UIDs! Ref.: https://www.limilabs.com/blog/unique-id-in-imap-protocol
for uid in c.iter_uids(GMAIL_ALL_MAIL):
try:
# Have we already inspected this message?
db_msg = db.query(Message).get(uid)
if db_msg is None:
print('Need to fetch message UID=={}'.format(uid))
imap_msg = c.fetch_message(uid)
db_msg = populate_message(db, uid, imap_msg)
except:
db.rollback()
raise
else:
db.commit()
# Merge all of the data in the local cache, apply our filters/data-scrubbing/heuristics/etc., and produce our
# result.
correspondents = set()
for db_msg in db.query(Message).all():
# https://www.iana.org/assignments/auto-submitted-keywords/auto-submitted-keywords.xhtml
if db_msg.header_auto_submitted is not None and db_msg.header_auto_submitted.lower().startswith('auto-'):
print('** ignoring message with Auto-Submitted header')
continue
# https://www.ietf.org/rfc/rfc2369.txt
if db_msg.header_list_unsubscribe is not None and db_msg.header_list_unsubscribe != '':
print('** ignoring message with List-Unsubscribe header')
continue
# Discard individual automated senders.
for msg_addr in db_msg.message_addresses:
addr = msg_addr.address.as_tuple()
if is_automated(addr):
print('** ignoring address that looks automated: {}'.format(addr))
continue
correspondents.add(addr)
# Display our list!
for name, email_addr in sorted(merge_addresses(correspondents)):
print((name, email_addr))
| kelleyk/imap-scripts | imap_scripts/get_correspondents.py | get_correspondents.py | py | 6,133 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 37,
"usage_type": "attribute"
},
{
"api_name": "imaplib.IMAP4_SSL",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "email.message_from_bytes",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "arg... |
72717961063 | #!/usr/bin/env python3
"""
Experiment with different parameters the CNN training and collect accuracy
Quick and dirty adaptation from train.py.
Search `TUNING` in this code for hard coded parameters to try.
[Code left as is, not cleaned, to explicit what I've tried...]
"""
import os
import pickle
import argparse
import datetime
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim as optim
from train import test, train, get_data, hybrid_model
def main():
# Pre-trained model
VALID_ARCH_CHOICES = ("vgg16", "vgg13", "densenet121")
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("data_dir",
help="Directory containing the dataset (default: data)",
default="data",
nargs="?")
ap.add_argument("--arch",
help="Model architecture from 'torchvision.models' (default: vgg16)",
choices=VALID_ARCH_CHOICES, default=VALID_ARCH_CHOICES[0])
# ap.add_argument("--hidden-units",
# help="Number of units the hidden layer should consist of (default: 512)",
# default=512,
# type=int)
ap.add_argument("--cpu",
help="Use CPU (else GPU) for training (default if not set: GPU)",
action="store_true")
args = ap.parse_args()
device = "cpu" if args.cpu else "cuda"
args.device = device
args.noise = 0.25
args.clip = 1.0
args.batch_size = 64
args.hidden_units = 256
args.delta = 1e-4
# Build model: chose loss function, optimizer, processor support
# # Done later to reset the model
# model = hybrid_model(arch=args.arch, hidden_units=args.hidden_units)
criterion = nn.NLLLoss()
device = "cpu" if args.cpu else "cuda"
# ===== TUNING ===========================================================
# Hyperparameters to test
lr_range = [1e-4] ##### <== choice (enumeration)
batch_size_range = [32, 16, 8, 2] #, 32, 128, 8, 4, 1] ##### <== choice (enumeration)
epochs = 30 ##### <== choice (1 value=max)
# Number of iteration for each parameter
iter = 1 ##### <== choice (single value)
# DP or not DP, that is the question
args.disable_dp = False ##### <== choice (boolean)
# ========================================================================
# File to export results
dp_or_not = "noDP_" if args.disable_dp else "DP_"
file = "experiment_stats/accuracy_data_" + dp_or_not
file += str(datetime.datetime.today()).replace(' ','_') + ".csv"
steps = len(lr_range) * len(batch_size_range) * iter
step = 0
# Write column titles
with open(file, 'w') as f:
f.write('learning_rate, batch_size, n_epochs, accuracy, n_times_for_avg\n')
# Experiment loops
for lr in lr_range:
args.learning_rate = lr
for bs in batch_size_range:
args.batch_size = bs
# Load the dataset into a dataloader ### default test batch size ###
trainloader, testloader, mapping = get_data(data_folder=args.data_dir,
batch_size=bs)
args.sample_size = len(trainloader.dataset)
#for epochs in epochs_range:
accuracy_sum = []
for _ in range(iter):
# Reset the model
model, optimizer = hybrid_model(arch=args.arch,
hidden_units=args.hidden_units,
args=args)
step += 1
_, acc = train(model=model,
trainloader=trainloader,
testloader=testloader,
epochs=epochs,
print_every=None,
criterion=criterion,
optimizer=optimizer,
device=device,
arch=args.arch,
model_dir='',
serialize=False,
detail=False,
args=args,
)
acc = np.multiply(acc, 100)
accuracy_sum.append(acc)
print(f' {step}/{steps}\tlr={lr}, bs={bs},')
for n_epoch, accur in enumerate(acc, start=1):
line = f'{lr}, {bs}, {n_epoch}, {accur:.2f}, 1\n'
with open(file, 'a') as f:
f.write(line)
print(f'\t. ×{n_epoch} epoch{"s" if n_epoch > 1 else " "}'
f' => accuracy = {accur:.2f}%')
# Sum up for identical settings, repeted `iter` times
if iter > 1:
acc_avg = np.average(accuracy_sum, axis=0)
for n_epoch, accur in enumerate(acc_avg, start=1):
line = f'{lr}, {bs}, {n_epoch}, {accur:.2f}, {iter}\n'
with open(file, 'a') as f:
f.write(line)
print(f'\t\t>>> Average on {iter} iterations >>>\tlr={lr}, bs={bs},'
f' ×{n_epoch} epoch{"s" if n_epoch > 1 else " "}'
f' => accuracy = {accur:.2f}%')
if __name__ == "__main__":
main()
| jmg-74/exam | flowers/stats.py | stats.py | py | 5,713 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.nn.NLLLoss",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.t... |
9587814547 | import os
import subprocess
from plugin import LINUX, WINDOWS, plugin, require
VALID_OPTIONS = ['status', 'vendor', 'energy', 'technology', 'remaining']
@require(platform=WINDOWS)
@plugin('battery')
def battery_WIN32(jarvis, s):
"""
Provides basic info about battery for win32
"""
# https://stackoverflow.com/a/41988506/6771356
import psutil
def secs2hours(secs):
mm, ss = divmod(secs, 60)
hh, mm = divmod(mm, 60)
return "%d:%02d:%02d" % (hh, mm, ss)
batt = psutil.sensors_battery()
if batt.power_plugged:
jarvis.say("Battery is charging: %s%%" % batt.percent)
else:
jarvis.say("charge = %s%%, time left = %s" %
(batt.percent, secs2hours(batt.secsleft)))
@require(platform=LINUX, native='upower')
@plugin('battery')
def battery_LINUX(jarvis, s):
"""
Provides battery status eg: percentage
"""
# Get the battery info using upower
jarvis.say(get_specific_info(s.lower()))
def get_specific_info(info_required):
"""
Gets specific information about the battery
as per the argument 'info_required'
Example:
get_specific_info("vendor") - Returns the vendor of the battery
get_specific_info("status") - Returns the status of the battery
"""
# List containing the command to run to find the specific
# info in a group of battery related information
grep_command = ["grep", "-E"]
grep_text = {
'status': "state|to full|percentage",
'vendor': "vendor",
'energy': "energy",
'technology': "technology",
'remaining': "time to empty",
}.get(info_required, "default")
# If the user has entered a valid option
if grep_text != "default":
grep_command.append(grep_text)
else:
# User has entered something invalid
return "Invalid option given. Here's a list of options:\n" + \
', '.join(VALID_OPTIONS)
# Run command to get full information about the battery
battery_info_command = subprocess.Popen([
"upower",
"-i",
"/org/freedesktop/UPower/devices/battery_BAT0"
],
stdout=subprocess.PIPE
)
# From the above output, only get the specific info required
specific_info_command = subprocess.Popen(grep_command,
stdin=battery_info_command.stdout,
stdout=subprocess.PIPE
)
battery_info_command.stdout.close()
# Get the output after piping both the commands
output = specific_info_command.communicate()[0]
output = output.decode("utf-8")
return output
@require(platform=LINUX, native='!upower')
@plugin('battery')
def battery_linux_fallback(jarvis, s):
"""
Provides battery status like battery percentage
and if the battery is charging or not
"""
# Get the battery info
# https://askubuntu.com/a/309146
battery_dir = False
for bat_num in range(10):
battery_dir_check = "/sys/class/power_supply/BAT{}/".format(str(bat_num))
if os.path.exists(battery_dir_check):
battery_dir = battery_dir_check
break
if battery_dir is False:
jarvis.say("No Battery found!")
return
def get_battery_info(info):
return subprocess.check_output(["cat", battery_dir + info]).decode("utf-8")[:-1]
battery_text = ["Status: " + get_battery_info("status"), "Charge: " + get_battery_info("capacity") + "%"]
battery_info = '\n'.join(battery_text)
jarvis.say(battery_info)
| sukeesh/Jarvis | jarviscli/plugins/battery.py | battery.py | py | 3,633 | python | en | code | 2,765 | github-code | 36 | [
{
"api_name": "psutil.sensors_battery",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "plugin.require",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "plugin.WINDOWS",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "plugin.plugin",
... |
25985906594 | import stanza
import json
from tqdm import tqdm
import os
def save_as_json(objects, fname):
with open('data/'+fname, 'w') as output:
json.dump(objects, output, indent=4)
nlp = stanza.Pipeline(lang='en', processors='tokenize,mwt,pos,lemma', logging_level='DEBUG')
fiction_list = [_ for _ in os.listdir("input") if _.endswith(".txt")]
for fiction in tqdm(fiction_list):
list_for_json = []
with open("input/" + fiction, "r") as f:
documents = f.readlines()
in_docs = [stanza.Document([], text=d) for d in documents] # Wrap each document with a stanza.Document object
out_docs = nlp(in_docs) # Call the neural pipeline on this list of documents
for d in out_docs:
list_for_json.append(d.to_dict())
save_as_json(list_for_json, fiction)
| scarletcho/coca-fiction-search | sentence-tokenize.py | sentence-tokenize.py | py | 793 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "stanza.Pipeline",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 13... |
27232603168 | import curses
from ..config import CORRECT_LETTER_COLOR_PAIR_INDEX, EXISTS_LETTER_COLOR_PAIR_INDEX, INCORRECT_LETTER_COLOR_PAIR_INDEX, NORMAL_LETTER_COLOR_PAIR_INDEX, TILE_SIZE
from .screen import apply_color_pair, remove_color_pair
def generate_game_element(letter, correct=False, exists_in_answer=False, incorrect=False):
return {'value': letter, 'correct': correct, 'exists_in_answer': exists_in_answer, 'incorrect': incorrect}
def pad_letter_with_spaces_to_match_tile_size(letter: str):
return letter.center(TILE_SIZE)
def get_counting_dict(word):
return {item: word.count(item) for item in set(word)}
def get_word_letters_marked(word_letters_elements, chosen_word):
marked_word_letters = [element for element in word_letters_elements]
word_letters = [element['value'] for element in word_letters_elements]
letters_count_in_chosen_word = get_counting_dict(chosen_word)
# mark true letters first (green)
for index, chosen_word_letter in enumerate(chosen_word):
if word_letters[index] == chosen_word_letter and letters_count_in_chosen_word[chosen_word_letter] > 0:
marked_word_letters[index]['correct'] = True
letters_count_in_chosen_word[chosen_word_letter] -= 1
# mark letters that exists in the chosen word
for index, letter in enumerate(word_letters):
if letter in chosen_word and letters_count_in_chosen_word[letter] > 0:
marked_word_letters[index]['exists_in_answer'] = True
letters_count_in_chosen_word[letter] -= 1
# mark incorrect letters (doesnt exists in the chosen word)
for index, marked_letter in enumerate(marked_word_letters):
if marked_letter['correct'] == False and marked_letter['exists_in_answer'] == False:
marked_word_letters[index]['incorrect'] = True
return marked_word_letters
def draw_square(stdscr: 'curses._CursesWindow', i, j, element, board_size, offset):
height, width = stdscr.getmaxyx()
value, correct, exists_in_answer, incorrect = element['value'], element['correct'], element['exists_in_answer'], element['incorrect']
if correct:
apply_color_pair(stdscr, CORRECT_LETTER_COLOR_PAIR_INDEX)
elif incorrect:
apply_color_pair(stdscr, INCORRECT_LETTER_COLOR_PAIR_INDEX)
elif exists_in_answer:
apply_color_pair(stdscr, EXISTS_LETTER_COLOR_PAIR_INDEX)
else:
apply_color_pair(stdscr, NORMAL_LETTER_COLOR_PAIR_INDEX)
x = int((width / 2) - TILE_SIZE * (board_size - 0.5 - 2 * i)) + offset
y = int((height / 2) - (board_size - 0.5 - 2 * j))
if y <= 0 or x <= 0 or x >= width or y >= height:
raise Exception('board can\'t fit inside terminal size')
padded_letter = pad_letter_with_spaces_to_match_tile_size(value)
stdscr.addstr(y, x, padded_letter)
remove_color_pair(stdscr, NORMAL_LETTER_COLOR_PAIR_INDEX)
remove_color_pair(stdscr, CORRECT_LETTER_COLOR_PAIR_INDEX)
remove_color_pair(stdscr, EXISTS_LETTER_COLOR_PAIR_INDEX)
remove_color_pair(stdscr, INCORRECT_LETTER_COLOR_PAIR_INDEX)
def draw_game_board(stdscr: 'curses._CursesWindow', game_board, offset = 0):
for row_index, row in enumerate(game_board):
for col_index, element in enumerate(row):
draw_square(stdscr, col_index, row_index, element, len(game_board), offset)
| Samoray-l337/terminal-wordle | src/utils/game.py | game.py | py | 3,334 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.TILE_SIZE",
"line_number": 12,
"usage_type": "argument"
},
{
"api_name": "screen.apply_color_pair",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "config.CORRECT_LETTER_COLOR_PAIR_INDEX",
"line_number": 51,
"usage_type": "argument"
},
... |
73478586985 | from pprint import pformat
from six import iteritems
class PITimedValues(object):
swagger_types = {
'items': 'list[PITimedValue]',
'units_abbreviation': 'str',
'web_exception': 'PIWebException',
}
attribute_map = {
'items': 'Items',
'units_abbreviation': 'UnitsAbbreviation',
'web_exception': 'WebException',
}
def __init__(self, items=None, units_abbreviation=None, web_exception=None):
self._items = None
self._units_abbreviation = None
self._web_exception = None
if items is not None:
self.items = items
if units_abbreviation is not None:
self.units_abbreviation = units_abbreviation
if web_exception is not None:
self.web_exception = web_exception
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
@property
def units_abbreviation(self):
return self._units_abbreviation
@units_abbreviation.setter
def units_abbreviation(self, units_abbreviation):
self._units_abbreviation = units_abbreviation
@property
def web_exception(self):
return self._web_exception
@web_exception.setter
def web_exception(self, web_exception):
self._web_exception = web_exception
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PITimedValues):
return False
return self.__dict__ == other.__dict__
| dcbark01/PI-Web-API-Client-Python | osisoft/pidevclub/piwebapi/models/pi_timed_values.py | pi_timed_values.py | py | 2,018 | python | en | code | 39 | github-code | 36 | [
{
"api_name": "six.iteritems",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 76,
"usage_type": "call"
}
] |
34321829332 | #-*- coding: utf-8 -*-
from django.db import models
from django.db.models import Max
from .category import Category
class Product(models.Model):
category = models.ForeignKey(Category, related_name="products", null=True, blank=True, on_delete=models.SET_NULL)
name = models.CharField(db_index=True, max_length=255)
description = models.TextField(null=True, blank=True)
image = models.CharField(max_length=255, null=True, blank=True)
thumbnail = models.CharField(max_length=255, null=True, blank=True)
position = models.IntegerField(default=0)
active = models.BooleanField(default=False)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
self.__before_save(force_insert, force_update, using, update_fields)
super().save(force_insert, force_update, using, update_fields)
def __before_save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
if self.position is None:
self.position = self.__get_max_position(self.category) + 1
def __get_max_position(self, category):
category_id = None
if isinstance(category, int):
category_id = category
elif isinstance(category, Category):
category_id = category.pk
else:
raise ValueError('Unsupported category value given.')
position = Product.objects.values('position').filter(
category=category_id).aggregate(Max('position'))
position = position if isinstance(position, int) else position['position']
if position == None:
position = 0
return position
def __str__(self):
return self.name | anttiranta/sirkkapelkonen.net-python | app/www/models/www/product.py | product.py | py | 1,737 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": ... |
27550324610 | import datetime
from imap_tools import EmailAddress
DATA = dict(
subject='Re: TESTテストテスト',
from_='atsushi@example.com',
to=('rudeboyjet@gmail.com',),
cc=(),
bcc=(),
reply_to=('rudeboyjet@gmail.com',),
date=datetime.datetime(2011, 8, 19, 10, 47, 17, tzinfo=datetime.timezone(datetime.timedelta(0, 32400))),
date_str='Fri, 19 Aug 2011 10:47:17 +0900',
text='Hello\r\n',
html='',
headers={'date': ('Fri, 19 Aug 2011 10:47:17 +0900',), 'from': ('Atsushi Yoshida <atsushi@example.com>',), 'reply-to': ('rudeboyjet@gmail.com',), 'subject': ('Re: TEST\r\n \r\n\t=?ISO-2022-JP?B?GyRCJUYlOSVIGyhC?=\r\n =?ISO-2022-JP?B?GyRCJUYlOSVIGyhC?=',), 'to': ('rudeboyjet@gmail.com',), 'message-id': ('<0CC5E11ED2C1D@example.com>',), 'in-reply-to': ('<rid_5582199198@msgid.example.com>',), 'mime-version': ('1.0',), 'content-type': ('text/plain; charset=iso-2022-jp',), 'content-transfer-encoding': ('7bit',)},
attachments=[],
from_values=EmailAddress(name='Atsushi Yoshida', email='atsushi@example.com'),
to_values=(EmailAddress(name='', email='rudeboyjet@gmail.com'),),
cc_values=(),
bcc_values=(),
reply_to_values=(EmailAddress(name='', email='rudeboyjet@gmail.com'),),
) | ikvk/imap_tools | tests/messages_data/rfc2822/example14.py | example14.py | py | 1,236 | python | en | code | 608 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.timezone",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "imap_tools.Ema... |
37662338422 | import os
import setuptools
requirement_files = []
# walk the requirements directory and gather requirement files
for root, dirs, files in os.walk('requirements'):
for requirements_file in files:
requirements_file_path = os.path.join(root, requirements_file)
requirement_files.append(requirements_file_path)
if 'requirements/requirements.txt' in requirement_files:
requirement_files.remove('requirements/requirements.txt')
# parse all requirement files and generate requirements
requirements = []
for requirement_file in requirement_files:
with open(requirement_file, "r") as f:
for req in f.read().splitlines():
if len(req) > 0 and not req.startswith('#'):
requirements.append(req)
setuptools.setup(
install_requires=requirements,
setup_requires=['pbr==0.11.0', 'testrepository'],
pbr=True)
| rackerlabs/poppy | setup.py | setup.py | py | 874 | python | en | code | null | github-code | 36 | [
{
"api_name": "os.walk",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "setuptools.setup",
"line_number"... |
70376883625 | import requests
from random import randrange
from randomWalk import dateIncrease
from olivier import find_arrival
def chooseDestination_new (depart , budget , date , dayspercity=2 , country = 'CH' , currency = 'CHF', locale = 'en-GB' , destination = 'anywhere' ) :
url = 'http://partners.api.skyscanner.net/apiservices/browsequotes/v1.0/'+str(country)+'/'+str(currency)+'/'+str(locale)+'/'+str(depart)+'/'+str(destination)+'/'+str(date)+'/?apiKey=ha712564909427747796218296388326'
r = requests.get(url)
rjson = r.json()
trips = rjson['Quotes']
if(trips == []) :
return False , []
price = budget + 1
count = 0
maxIter = len(trips)*5
while (price > budget) :
goodTrip = (trips[randrange(len(trips))])
price = goodTrip['MinPrice']
count += 1
if ( count > maxIter ) :
return False ,[]
arrived = goodTrip['OutboundLeg']['DestinationId']
for place in rjson["Places"] :
if (place['PlaceId'] == arrived) :
goodTrip['arrived'] = place
break
goodTrip['arrivalTime'] = str(goodTrip['OutboundLeg']['DepartureDate'])
return True ,goodTrip
def fast_getter(depart, budget, date, country = 'CH' , currency = 'CHF', locale = 'en-GB', dayspercity=2, marge=0):
destlist=[]
price=0
fromcode=depart
currdate=date
enoughBudget = True
current_budget = budget
while (enoughBudget): #find route until no money
dico={}
(enoughBudget, destination) = chooseDestination_new(fromcode, current_budget, currdate, )
if(enoughBudget):
dico['CodeBeginning'] = fromcode
dico['DepartureDate']= currdate
dico['Price']=destination['MinPrice']
dico['CodeArrival'] = destination['arrived']['IataCode']
dico['NameEnding'] = destination['arrived']['CityName']
current_budget -= dico['Price']
print(destination['MinPrice'], "to go to",dico['CodeArrival'], dico['NameEnding'])
fromcode=dico['CodeArrival']
currdate=dateIncrease(currdate, 1)
dico['ArrivalDate'] = currdate
currdate=dateIncrease(currdate, dayspercity)
destlist += [dico]
canGoHome = False
print("Trying to go home.....\n")
while (not canGoHome) : #come back until you can go home
position = destlist[-1]['CodeBeginning']
print("Going from",position,"to",depart,"with budget",current_budget,"(adjusted to",current_budget+budget*marge,") at", currdate)
(canGoHome , goHome) = chooseDestination_new (position , current_budget+budget*marge , currdate , country=country , currency=currency , locale=locale , destination=depart )
if ( canGoHome) :
# print(goHome)
dico={}
dico['CodeBeginning'] = fromcode
dico['DepartureDate']= currdate
dico['Price']=goHome['MinPrice']
dico['CodeArrival'] = goHome['arrived']['IataCode']
dico['NameEnding'] = goHome['arrived']['CityName']
# current_budget -= dico['Price']
# print(destination['MinPrice'], "to go to",dico['CodeArrival'], dico['NameEnding'])
# currdate=dateIncrease(currdate, 1)
dico['ArrivalDate'] = goHome['arrivalTime'][:10]
# goHome ['NameEnding'] = goHome['arrived']['CityName']
destlist += [dico]
else:
position = destlist[-1]['CodeBeginning']
current_budget += destlist[-1]['Price']
destlist = destlist [ : -1]
if (destlist == []) :
# print("RETURNING NONE")
return []
print("I am done")
return destlist
def heavy_getter(vols, country = 'CH' , currency = 'CHF', locale = 'en-GB', dayspercity=2):
longlist = []
for v in vols:
print("\n\n",v)
travel=find_arrival(v['CodeBeginning'], v['CodeArrival'], v['DepartureDate'])
t = travel[0]
t['NameEnding'] = v['NameEnding']
longlist.append(t)
return longlist
if __name__ == '__main__':
dest = []
while(True):
dest = fast_getter("GVA", 400, "2017-12-20",marge=0.1)
if(dest == []):
print("I FAILED !! I AM NOT ROOT !! i will come back...")
else:
print(dest)
break
print("Now confirming, with heavy...")
print(heavy_getter(dest))
| ttreyer/Skyscanner-CONpagnons | newgetters.py | newgetters.py | py | 4,427 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.randrange",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "randomWalk.dateIncrease",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "randomWalk.dateI... |
37432459091 | import pygame as pg
import sys
from settings import *
from player import *
from world_object_manager import *
from ray import *
from ray_manager import *
from object_renderer import *
class Game:
def __init__(self):
pg.init()
pg.mouse.set_visible(False)
self.screen = pg.display.set_mode(RES)
self.clock = pg.time.Clock()
self.delta_time = 1
self.new_game()
def new_game(self):
self.player = Player(self)
self.world_object_manager = WorldObjectManager(self)
self.world_object_manager.add_random_object_number(5)
self.object_renderer = ObjectRenderer(self)
self.ray_manager = RayManager(self, self.player, self.world_object_manager, self.object_renderer)
def update(self):
#updates modules
self.player.update()
self.ray_manager.update()
################
pg.display.flip()
self.delta_time = self.clock.tick(FPS)
pg.display.set_caption(f'{self.clock.get_fps() :.1f}')
def draw(self):
self.screen.fill('black')
#self.draw_2d()
self.draw_3d()
def draw_2d(self):
self.player.draw()
self.world_object_manager.draw()
def draw_3d(self):
self.object_renderer.render()
def check_events(self):
self.global_trigger = False
for event in pg.event.get():
if event.type == pg.QUIT or (event.type == pg.KEYDOWN and event.key == pg.K_ESCAPE):
pg.quit()
sys.exit()
def run(self):
while True:
self.check_events()
self.update()
self.draw()
if __name__ == '__main__':
game = Game()
game.run()
| makaempffer/game_2d_DOS | Dawn_Of_The_system/src/main.py | main.py | py | 1,826 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.mouse.set_visible",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.mouse",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
23958671415 | from pytorch_lightning import LightningDataModule
from pytorch_lightning.utilities.types import EVAL_DATALOADERS, TRAIN_DATALOADERS
from torch.utils.data import DataLoader, Dataset
class BasicDataModule(LightningDataModule):
def __init__(
self,
train_dataset: Dataset,
val_dataset: Dataset,
batch_size: int = 256,
num_workers: int = 0,
pin_memory: bool = False,
) -> None:
super().__init__()
self.train_dataset = train_dataset
self.val_dataset = val_dataset
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
def train_dataloader(self) -> TRAIN_DATALOADERS:
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
)
def val_dataloader(self) -> EVAL_DATALOADERS:
return DataLoader(
self.val_dataset,
batch_size=self.batch_size,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
)
| adamcasson/minT5 | mint5/datamodule.py | datamodule.py | py | 1,165 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytorch_lightning.LightningDataModule",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.Dataset",
"line_number": 10,
"usage_type": "name"
},
{
... |
33197438917 | from __future__ import print_function
import sys
import time
from optparse import OptionParser
import ezdxf
__version__ = "0.1"
class Stitcher(object):
def __init__(self, input_name, output_name):
self.old = ezdxf.readfile(input_name)
self.new = ezdxf.new()
self.output_name = output_name
self.partial_loops = []
self.closed_loops = []
# This can reconstruct polylines that are stored in the correct order.
# It does not destroy the direction, and works even if the segments are
# randomly ordered.
stats = {
"last_match": 0,
"insert": 0,
"random_order": 0,
"stitch_reversed": 0,
"approx_match": 0,
}
self.load_partial_loops()
def load_partial_loops(self):
t0 = time.time()
for line in self.old.modelspace().query("LINE"):
for i, p in enumerate(self.partial_loops):
if p[-1] == line.dxf.start:
p.append(line.dxf.end)
# if i == 0:
# stats['last_match'] += 1
break
else:
self.partial_loops.insert(0, [line.dxf.start, line.dxf.end])
# stats['insert'] += 1
t1 = time.time()
print("Grouped segments in", t1 - t0)
def promote_closed_loops(self):
t0 = time.time()
for i in range(len(self.partial_loops) - 1, -1, -1):
if (
self.partial_loops[i][0] == self.partial_loops[i][-1]
and len(self.partial_loops[i]) > 2
):
self.closed_loops.append(self.partial_loops.pop(i))
t1 = time.time()
print("promote", t1 - t0)
def promote_circles(self):
"""Promotes closed loops that are made of almost identical length
segments to be circles based on the *point* distance (matching openscad
behavior for polygonal approximations."""
for i in range(len(self.closed_loops) - 1, -1, -1):
c = find_circle_center(self.closed_loops[i])
print("closed loops", len(self.closed_loops[i]), "center", c)
if c is not None:
r = dist(c, self.closed_loops[i][0])
self.new.modelspace().add_circle(c, r)
del self.closed_loops[i]
def reconstruct_loops(self):
t0 = time.time()
for i in range(len(self.partial_loops) - 1):
for j in range(len(self.partial_loops) - 1, i, -1):
if self.partial_loops[i][-1] == self.partial_loops[j][0]:
self.partial_loops[i].extend(self.partial_loops.pop(j))
t1 = time.time()
print("reconstruct", t1 - t0)
"""
t1 = time.time()
print "De-randomized in", t1-t0
t0 = time.time()
for i in range(len(partial_loops)-1, -1, -1):
if (partial_loops[i][0] == partial_loops[i][-1] and
len(partial_loops[i]) > 2):
closed_loops.append(partial_loops.pop(i))
for i in range(len(partial_loops)-1):
for j in range(len(partial_loops)-1, i, -1):
if partial_loops[i][-1] == partial_loops[j][-1]:
partial_loops[i].extend(partial_loops.pop(j)[::-1])
stats['stitch_reversed'] += 1
t1 = time.time()
print "Corrected order in", t1-t0
for i in range(len(partial_loops)-1, -1, -1):
if (partial_loops[i][0] == partial_loops[i][-1] and
len(partial_loops[i]) > 2):
closed_loops.append(partial_loops.pop(i))
stats['closed'] = len(closed_loops)
stats['partial'] = len(partial_loops)
print stats
if partial_loops:
print partial_loops[0]
"""
def save(self):
for c in self.closed_loops:
self.new.modelspace().add_polyline2d(c)
for p in self.partial_loops:
self.new.modelspace().add_polyline2d(p)
self.new.filename = self.output_name
self.new.save()
def bounds_elementwise(lst):
"""Given a non-empty list, returns (mins, maxes) each of which is the same
length as the list items.
>>> bounds_elementwise([[0,6,0], [5,0,7]])
([0,0,0], [5,6,7])
"""
indices = list(range(len(lst[0])))
mins = [min(el[i] for el in lst) for i in indices]
maxes = [max(el[i] for el in lst) for i in indices]
return (mins, maxes)
def boundingbox(polyline):
"""Returns the bounding box, inclusive."""
mins, maxes = bounds_elementwise(polyline)
return [mins[0], mins[1], maxes[0], maxes[1]]
def bounding_box_intersect(b1, b2):
# A A B B = no intersect (except perhaps if they touch at edge)
# A B B A = yes
# A B A B = yes
x_pts = sorted([(b1[0], 1), (b1[2], 1), (b2[0], 2), (b2[2], 2)])
y_pts = sorted([(b1[1], 1), (b1[3], 1), (b2[1], 2), (b2[3], 2)])
return x_pts[0][1] != x_pts[1][1] and y_pts[0][1] != y_pts[1][1]
def dist(pt1, pt2):
dx = pt2[0] - pt1[0]
dy = pt2[1] - pt1[1]
return ((dx * dx) + (dy * dy)) ** 0.5
def close(a, b, e=0.01):
return abs(a - b) < e
def find_arc_center(pts, start):
"""Finds the center of an arc starting with pts[start:].
Returns ((x,y), radius, count), or None.
The count will never be fewer than 3.
All the middle segments will have the same length, and their perpendicular
bisectors will all point at the same `center`, which is `radius` units away
from the points (not the bisected line).
The first and last may be shorter, but if they were the same length, their
perpendicular bisectors would satisfy the same requirement.
TODO: Does this work with a circle? (It should not accidentally pick
segments a half-circle away from each other.)
"""
if start < len(pts) - 2:
return None
ideal_dist = dist(pts[start + 1], pts[start + 2])
i = start + 2
while i < len(pts) and close(ideal_dist, dist(pts[i], pts[i + 1])):
end = i + 1
i += 1
# pts[start+1:end] inclusive are segments the same length
def find_circle_center(polyline):
# Closed polylines will have first point duplicated, so odd are actually an
# even number of unique points.
if len(polyline) % 2 == 1:
box = boundingbox(polyline)
center = ((box[0] + box[2]) / 2, (box[1] + box[3]) / 2)
dists = [dist(center, pt) for pt in polyline[:-1]]
m, n = min(dists), max(dists)
if abs(m - n) > 0.01:
return None
else:
center = find_arc_center(polyline, 0)
# TODO crosscheck using some other points
return center
def main(args=None):
parser = OptionParser()
parser.add_option("-o", "--output-file", action="store", help="Name of output file")
(options, args) = parser.parse_args()
if not args:
print("Input filename is required")
sys.exit(1)
s = Stitcher(args[0], options.output_file or (args[0] + ".new.dxf"))
s.reconstruct_loops()
s.promote_closed_loops()
s.promote_circles()
s.save()
if __name__ == "__main__":
main()
| thatch/dxf_fix | dxf_fix/__init__.py | __init__.py | py | 7,096 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "ezdxf.readfile",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "ezdxf.new",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 46,... |
36771356783 | # -*- coding: utf-8 -*-
"""Training KGE models based on the sLCWA."""
import logging
from typing import Any, Mapping, Optional, Type
import torch
from torch.optim.optimizer import Optimizer
from pykeen.training.training_loop import TrainingLoop
from pykeen.training.utils import apply_label_smoothing
from pykeen.losses import CrossEntropyLoss
from pykeen.models.base import Model
from pykeen.sampling import BasicNegativeSampler, NegativeSampler
from pykeen.triples import SLCWAInstances
from pykeen.typing import MappedTriples
from pykeen.datasets import TriplesFactory
__all__ = [
'InductiveSLCWATrainingLoop',
]
logger = logging.getLogger(__name__)
class InductiveSLCWATrainingLoop(TrainingLoop):
"""A training loop that uses the stochastic local closed world assumption training approach."""
negative_sampler: NegativeSampler
loss_blacklist = [CrossEntropyLoss]
def __init__(
self,
model: Model,
optimizer: Optional[Optimizer] = None,
negative_sampler_cls: Optional[Type[NegativeSampler]] = None,
negative_sampler_kwargs: Optional[Mapping[str, Any]] = None,
):
"""Initialize the training loop.
:param model: The model to train
:param optimizer: The optimizer to use while training the model
:param negative_sampler_cls: The class of the negative sampler
:param negative_sampler_kwargs: Keyword arguments to pass to the negative sampler class on instantiation
for every positive one
"""
super().__init__(
model=model,
optimizer=optimizer,
)
if negative_sampler_cls is None:
negative_sampler_cls = BasicNegativeSampler
self.negative_sampler = negative_sampler_cls(
triples_factory=self.triples_factory,
**(negative_sampler_kwargs or {}),
)
@property
def triples_factory(self) -> TriplesFactory: # noqa: D401
"""The triples factory in the model."""
return self.model.transductive_triples_factory
@property
def num_negs_per_pos(self) -> int:
"""Return number of negatives per positive from the sampler.
Property for API compatibility
"""
return self.negative_sampler.num_negs_per_pos
def _create_instances(self, use_tqdm: Optional[bool] = None) -> SLCWAInstances: # noqa: D102
return self.triples_factory.create_slcwa_instances()
@staticmethod
def _get_batch_size(batch: MappedTriples) -> int: # noqa: D102
return batch.shape[0]
def _process_batch(
self,
batch: MappedTriples,
start: int,
stop: int,
label_smoothing: float = 0.0,
slice_size: Optional[int] = None,
) -> torch.FloatTensor: # noqa: D102
# Slicing is not possible in sLCWA training loops
if slice_size is not None:
raise AttributeError('Slicing is not possible for sLCWA training loops.')
# Send positive batch to device
positive_batch = batch[start:stop].to(device=self.device)
# Create negative samples
neg_samples = self.negative_sampler.sample(positive_batch=positive_batch)
# Ensure they reside on the device (should hold already for most simple negative samplers, e.g.
# BasicNegativeSampler, BernoulliNegativeSampler
negative_batch = neg_samples.to(self.device)
# Make it negative batch broadcastable (required for num_negs_per_pos > 1).
negative_batch = negative_batch.view(-1, 3)
# Compute negative and positive scores
positive_scores = self.model.score_hrt(positive_batch)
negative_scores = self.model.score_hrt(negative_batch)
loss = self._loss_helper(
positive_scores,
negative_scores,
label_smoothing,
)
return loss
def _mr_loss_helper(
self,
positive_scores: torch.FloatTensor,
negative_scores: torch.FloatTensor,
_label_smoothing=None,
) -> torch.FloatTensor:
# Repeat positives scores (necessary for more than one negative per positive)
if self.num_negs_per_pos > 1:
positive_scores = positive_scores.repeat(self.num_negs_per_pos, 1)
return self.model.compute_mr_loss(
positive_scores=positive_scores,
negative_scores=negative_scores,
)
def _self_adversarial_negative_sampling_loss_helper(
self,
positive_scores: torch.FloatTensor,
negative_scores: torch.FloatTensor,
_label_smoothing=None,
) -> torch.FloatTensor:
"""Compute self adversarial negative sampling loss."""
return self.model.compute_self_adversarial_negative_sampling_loss(
positive_scores=positive_scores,
negative_scores=negative_scores,
)
def _label_loss_helper(
self,
positive_scores: torch.FloatTensor,
negative_scores: torch.FloatTensor,
label_smoothing: float,
) -> torch.FloatTensor:
# Stack predictions
predictions = torch.cat([positive_scores, negative_scores], dim=0)
# Create target
ones = torch.ones_like(positive_scores, device=self.device)
zeros = torch.zeros_like(negative_scores, device=self.device)
labels = torch.cat([ones, zeros], dim=0)
if label_smoothing > 0.:
labels = apply_label_smoothing(
labels=labels,
epsilon=label_smoothing,
num_classes=self.model.num_entities,
)
# Normalize the loss to have the average loss per positive triple
# This allows comparability of sLCWA and LCWA losses
return self.model.compute_label_loss(
predictions=predictions,
labels=labels,
)
def _slice_size_search(
self,
batch_size: int,
sub_batch_size: int,
supports_sub_batching: bool,
) -> None: # noqa: D102
# Slicing is not possible for sLCWA
if supports_sub_batching:
report = "This model supports sub-batching, but it also requires slicing, which is not possible for sLCWA"
else:
report = "This model doesn't support sub-batching and slicing is not possible for sLCWA"
logger.warning(report)
raise MemoryError("The current model can't be trained on this hardware with these parameters.")
| migalkin/NodePiece | inductive_lp/loops/inductive_slcwa.py | inductive_slcwa.py | py | 6,454 | python | en | code | 131 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pykeen.training.training_loop.TrainingLoop",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "pykeen.sampling.NegativeSampler",
"line_number": 30,
"usage_type": "name"
... |
36082506802 | """
Gradient Penalty implementation for WGAN-GP
"""
import torch
import torch.nn
# Define the gradient penalty for Wasserstein GAN
# Implementation as is in paper
def gradient_penalty(critic, real, fake, device):
BATCH_SIZE, C, H, W = real.shape
epsilon = torch.rand((BATCH_SIZE, 1, 1, 1)).repeat(1, C, H, W).to(device)
interpolated_images = (real * epsilon) + (fake * (1 - epsilon))
# Calculate the critic scores
mixed_scores = critic(interpolated_images)
gradient = torch.autograd.grad(
inputs=interpolated_images,
outputs=mixed_scores,
grad_outputs=torch.ones_like(mixed_scores),
create_graph=True,
retain_graph=True
)[0]
gradient = gradient.view(gradient.shape[0], -1)
gradient_norm = gradient.norm(2, dim=1)
gradient_penalty = torch.mean((gradient_norm - 1) ** 2)
return gradient_penalty
| EoinM96/pokeGAN | utils.py | utils.py | py | 885 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.rand",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.autograd.grad",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.autograd",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "torch.ones_like",
... |
70996234665 | import json
from datetime import datetime, timezone
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import AccessMixin
from django.db.models import Count, Q, F
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import redirect
from django.views.generic import TemplateView
from rest_framework.authentication import SessionAuthentication
from rest_framework.decorators import api_view, authentication_classes, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from account.models import Account, Company
from account.functions import sms_text_replace, checkPhone, sendSmsOneContact
from board.models import Lead, LeadAction, Task, Telegram_user, LeadPoles, SMSTemplate, SMS_template_choise, \
UrlRedirect, NoteForm, FormQuestion, FormAnswer, AnswerQuestion, Product, Payment_type, Shopping
from board.serializers import LeadSerializer, TaskSerializer, CompanySerializer, Telegram_userSerializer
import xlwt
@api_view(['GET'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def export_excel(request):
response = HttpResponse(content_type='application/ms-excel')
wb = xlwt.Workbook(encoding='utf-8')
style = xlwt.XFStyle()
font = xlwt.Font()
font.bold = True
style.font = font
lead_poles = LeadPoles.objects.filter(company=request.user.company)
for pole in lead_poles:
row_num = 1
ws = wb.add_sheet(pole.name)
columns = ["Ro'yxatga olingan sana", "Nomi", "Telefon", "Qo'shimcha telefon", "Ro'yxatga olgan user"]
columnsNested = ["", "Sana", "Mahsulot", "Soni", "Narxi", "Summa", "To'lov turi", "Izoh"]
for col_num in range(len(columns)):
ws.write(row_num, col_num, columns[col_num], style=style)
leads = Lead.objects.filter(status=0, pole=pole, created_user__company=request.user.company)
shoppings = Shopping.objects.filter(lead__created_user__company=request.user.company)
for lead in leads:
row_num += 1
ws.write(row_num, 0, str(lead.date.strftime("%Y-%m-%d %H:%M:%S")))
ws.write(row_num, 1, lead.name)
ws.write(row_num, 2, lead.phone)
ws.write(row_num, 3, lead.phone2)
ws.write(row_num, 4, lead.created_user.first_name)
nestedBor = False
for shop in shoppings:
if shop.lead == lead:
if not nestedBor:
row_num += 1
ws.write(row_num, 3, "Sotilgan mahsulotlar", style=style)
row_num += 1
for col_num in range(len(columnsNested)):
ws.write(row_num, col_num, columnsNested[col_num], style=style)
nestedBor = True
row_num += 1
ws.write(row_num, 1, shop.date.strftime("%Y-%m-%d %H:%M"))
ws.write(row_num, 2, shop.product.name)
ws.write(row_num, 3, shop.count)
ws.write(row_num, 4, shop.price)
ws.write(row_num, 5, shop.amount)
ws.write(row_num, 6, shop.payment_type.name)
ws.write(row_num, 7, shop.comment)
response['Content-Disposition'] = f'attachment; filename="Leads ' \
f'{datetime.today().strftime("%Y-%m-%d %H-%M-%S")}.xls"'
wb.save(response)
return response
def register_lead_send_sms(lead: Lead):
template = SMSTemplate.objects.filter(
active=True, company__active=True, type=SMS_template_choise[0][0],
company=lead.created_user.company
).first()
if template:
text = sms_text_replace(template.text, lead)
can, phone = checkPhone(lead.phone)
if can:
sendSmsOneContact(template.company, lead.phone, text)
# Telegram bot uchun boshlanishi <<<<<<<<<
@api_view(['GET'])
def telegram_bot_get_company(request):
try:
token = request.GET.get('token')
chat_id = int(request.GET.get('chat_id'))
company = Company.objects.filter(tg_token=token).first()
if Telegram_user.objects.filter(chat_id=chat_id, token=token).count() == 0:
userr = Telegram_user.objects.create(
chat_id=chat_id,
token=token
)
return Response({
"company": CompanySerializer(company).data,
"user": Telegram_userSerializer(userr).data
})
else:
return Response({
"company": CompanySerializer(company).data,
"user": Telegram_userSerializer(Telegram_user.objects.filter(chat_id=chat_id).first()).data
})
except:
return Response({"message": "Error"}, 404)
@api_view(['GET'])
def telegram_bot_add_phone(request):
try:
phone = request.GET.get('phone')
token = request.GET.get('token')
chat_id = int(request.GET.get('chat_id'))
user = Telegram_user.objects.filter(chat_id=chat_id, token=token).first()
user.step = 2
user.phone = phone
user.save()
return Response({
"user": Telegram_userSerializer(user).data
})
except:
return Response({"message": "Error"}, 404)
@api_view(['GET'])
def telegram_bot_add_name(request):
try:
name = request.GET.get('name')
token = request.GET.get('token')
chat_id = int(request.GET.get('chat_id'))
user = Telegram_user.objects.filter(chat_id=chat_id, token=token).first()
user.step = 3
user.name = name
user.save()
return Response({
"user": Telegram_userSerializer(user).data
})
except:
return Response({"message": "Error"}, 404)
@api_view(['GET'])
def telegram_bot_add_company(request):
try:
company = request.GET.get('company')
token = request.GET.get('token')
chat_id = int(request.GET.get('chat_id'))
user = Telegram_user.objects.filter(chat_id=chat_id, token=token).first()
user.step = 4
user.company = company
user.save()
return Response({
"user": Telegram_userSerializer(user).data
})
except:
return Response({"message": "Error"}, 404)
@api_view(['GET'])
def telegram_bot_add_company_address(request):
try:
companyadd = request.GET.get('companyaddress')
token = request.GET.get('token')
chat_id = int(request.GET.get('chat_id'))
user = Telegram_user.objects.filter(chat_id=chat_id, token=token).first()
user.step = 5
user.companyAddress = companyadd
user.save()
return Response({
"user": Telegram_userSerializer(user).data
})
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
def create_lead_by_tg(request):
try:
data = request.data
name = data['name']
company = data['company']
address = data['address']
phone = data['phone']
tg_token = data['token']
phone = str(phone).replace('+', '')
tg_chatid = int(data['tg_chatid'])
if Lead.objects.filter(tg_chatid=tg_chatid, created_user__company__tg_token=tg_token).count() == 0:
companyCOm = Company.objects.filter(tg_token=tg_token).first()
created_user = Account.objects.filter(company=companyCOm, is_director=True).first()
lead = Lead.objects.create(
name=name,
company=company,
phone=phone,
companyAddress=address,
created_user=created_user,
joinBy=1,
tg_chatid=tg_chatid
)
LeadAction.objects.create(lead=lead, changer=created_user)
register_lead_send_sms(lead)
return Response({
"user": LeadSerializer(lead).data
})
else:
return Response({"message": "Error"}, 501)
except:
return Response({"message": "Error"}, 404)
# Telegram bot uchun tugashi >>>>>>>>
def is_B2B(request):
return request.user.company.type == "B2B"
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def create_lead(request):
try:
data = request.data
pole = LeadPoles.objects.filter(company=request.user.company).first()
name = data['name']
price = int(data['price'])
user = int(data['user'])
if is_B2B(request):
company = data['company']
address = data['address']
lead = Lead.objects.create(name=name,
price=price,
company=company,
companyAddress=address,
pole=pole,
created_user_id=user)
else:
phone = data['phone']
lead = Lead.objects.create(name=name,
price=price,
phone=phone,
pole=pole,
created_user_id=user)
LeadAction.objects.create(lead=lead, changer_id=user)
register_lead_send_sms(lead)
return Response(LeadSerializer(lead).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def edit_lead(request):
try:
data = request.data
leadId = int(data['lead'])
name = data['name']
price = int(data['price'])
user = int(data['user'])
lead = Lead.objects.get(id=leadId)
lead.name = name
lead.price = price
if is_B2B(request):
company = data['company']
address = data['address']
lead.company = company
lead.companyAddress = address
else:
phone = data['phone']
lead.phone = phone
LeadAction.objects.create(lead=lead, changer_id=user, status=1)
lead.save()
return Response(LeadSerializer(lead).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def change_lead_status(request):
try:
data = request.data
izoh = data['izoh']
leadID = int(data['lead'])
user = int(data['user'])
new_pole = int(data['status'])
lead = Lead.objects.get(id=leadID)
LeadAction.objects.create(lead=lead, changer_id=user, note=izoh,
oldpole_id=lead.pole_id,
newpole_id=new_pole, status=4)
lead.pole_id = new_pole
lead.save()
return Response(LeadSerializer(lead).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def lead_finished(request):
try:
data = request.data
price = int(data['price'])
leadID = int(data['lead'])
user = int(data['user'])
lead = Lead.objects.get(id=leadID)
LeadAction.objects.create(lead=lead, changer_id=user, oldStatus=lead.status, newStatus=5, status=2)
lead.status = 5
lead.finishedPrice = price
lead.finishedDate = datetime.now(timezone.utc)
lead.save()
return Response(LeadSerializer(lead).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def lead_losed(request):
try:
data = request.data
izoh = data['izoh']
leadID = int(data['lead'])
user = int(data['user'])
lead = Lead.objects.get(id=leadID)
LeadAction.objects.create(lead=lead, changer_id=user, note=izoh, oldStatus=lead.status, newStatus=4, status=2)
lead.status = 4
lead.save()
return Response(LeadSerializer(lead).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def create_task(request):
try:
data = request.data
name = data['name']
user = int(data['user'])
task = Task.objects.create(name=name, created_user_id=user)
return Response(TaskSerializer(task).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def change_task_status(request):
try:
data = request.data
taskId = int(data['task'])
status = int(data['status'])
task = Task.objects.get(id=taskId)
task.status = status
if status == 2:
task.finishedDate = datetime.now(timezone.utc)
task.save()
return Response(TaskSerializer(task).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def note_task(request):
try:
data = request.data
taskId = int(data['task'])
note = data['note']
task = Task.objects.get(id=taskId)
task.note = note
task.save()
return Response(TaskSerializer(task).data)
except:
return Response({"message": "Error"}, 404)
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def get_lead_count(request):
try:
data = request.data
status = list(json.loads(data['status']))
pole_list = list(json.loads(data['pole_list']))
LeadsCount = Lead.objects \
.filter(created_user__company=request.user.company) \
.filter(Q(status__in=status) | (Q(status=0) & Q(
pole_id__in=pole_list))).count()
# status=0 bolishi kerak lead bordda bo'lishi uchun
return Response({"count": LeadsCount})
except:
return Response({"message": "Error"}, 404)
@api_view(['GET'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def check_pole_can_delete(request):
try:
pole_id = int(request.GET['pole_id'])
leads = list(Lead.objects.filter(pole_id=pole_id).values(
'created_user__username'
).annotate(
count=Count('pole_id')
))
if len(leads) > 0:
return Response({"data": leads, "status": 500})
else:
return Response({"data": [], "status": 200})
except:
return Response({"data": [], "status": 505})
@api_view(['POST'])
@authentication_classes([SessionAuthentication])
@permission_classes([IsAuthenticated])
def delete_pole(request):
try:
pole_id = int(request.POST['pole_id'])
count = Lead.objects.filter(pole_id=pole_id).count()
if count == 0:
LeadPoles.objects.get(id=pole_id).delete()
return Response({"status": 200})
except:
return Response({"status": 505})
@login_required
def add_pole(request):
try:
if request.user.is_director:
if request.POST['name'] != "":
LeadPoles.objects.create(company=request.user.company, name=request.POST['name'])
except:
pass
return redirect('board')
@login_required
def edit_pole(request):
try:
if request.user.is_director:
pole = LeadPoles.objects.get(id=int(request.POST['id']))
pole.name = request.POST['name']
pole.save()
except:
pass
return redirect('board')
class Board(TemplateView, AccessMixin):
template_name = 'Board.html'
def get_context_data(self, *args, **kwargs):
super(Board, self).get_context_data(**kwargs)
leads = Lead.objects.filter(status__lt=4, created_user__company=self.request.user.company)
lead_poles = LeadPoles.objects.filter(company=self.request.user.company)
all_lead = []
if self.request.user.company.type == "B2B":
for i in leads:
all_lead.append(
{"id": i.id,
"name": i.name,
"date": i.date.strftime("%Y-%m-%d, %H:%M"),
"price": i.price,
"company": i.company,
"address": i.companyAddress
}
)
else:
for i in leads:
all_lead.append(
{"id": i.id,
"name": i.name,
"date": i.date.strftime("%Y-%m-%d, %H:%M"),
"price": i.price,
"phone": i.phone,
}
)
context = {
"Board": "active",
"leads": leads,
"all_leads": json.dumps(all_lead),
"lead_poles": lead_poles
}
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class TaskClass(TemplateView, AccessMixin):
template_name = 'task.html'
def get_context_data(self, *args, **kwargs):
super(TaskClass, self).get_context_data(**kwargs)
tasks = Task.objects.filter(status__lt=2, created_user=self.request.user)
group1 = tasks.filter(status=0)
group2 = tasks.filter(status=1)
all_tasks = []
for i in tasks:
all_tasks.append({
"id": i.id,
"name": i.name,
"date": i.date.strftime("%Y-%m-%d, %H:%M"),
"note": i.note
})
context = {
"Task": "active",
"group1": group1,
"group2": group2,
"all_tasks": json.dumps(all_tasks)
}
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class Redirect_class(TemplateView, AccessMixin):
template_name = 'redirect/list.html'
def get_context_data(self, *args, **kwargs):
super(Redirect_class, self).get_context_data(**kwargs)
urlRedirects = UrlRedirect.objects.filter(company=self.request.user.company)
formRedirects = NoteForm.objects.filter(company=self.request.user.company)
context = {
"redirect": "active",
"urlRedirects": urlRedirects,
"formRedirects": formRedirects,
}
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class NewUrlRedirect_class(TemplateView, AccessMixin):
template_name = 'redirect/add.html'
def post(self, *args, **kwargs):
try:
name = self.request.POST['name']
next_url = self.request.POST['next_url']
url = UrlRedirect()
url.company = self.request.user.company
url.name = name
url.next_url = next_url
url.save()
except:
pass
return redirect('redirect_list')
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class EditUrlRedirect_class(TemplateView, AccessMixin):
template_name = 'redirect/edit.html'
url_redirect = None
def post(self, *args, **kwargs):
try:
name = self.request.POST['name']
next_url = self.request.POST['next_url']
self.url_redirect.name = name
self.url_redirect.next_url = next_url
self.url_redirect.save()
except:
pass
return redirect('redirect_list')
def get_context_data(self, *args, **kwargs):
context = super(EditUrlRedirect_class, self).get_context_data(**kwargs)
context['url_redirect'] = self.url_redirect
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
pk = kwargs['pk']
try:
self.url_redirect = UrlRedirect.objects.get(company=self.request.user.company, id=pk)
except UrlRedirect.DoesNotExist:
return redirect("redirect_list")
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
def redirect_view(request):
try:
pk = int(request.GET['pk'])
next_url = request.GET['next_url']
social = request.GET['social']
if social == "telegram":
UrlRedirect.objects.filter(id=pk) \
.update(total_count=F('total_count') + 1,
telegram_count=F('telegram_count') + 1)
if social == "instagram":
UrlRedirect.objects.filter(id=pk) \
.update(total_count=F('total_count') + 1,
instagram_count=F('instagram_count') + 1)
if social == "facebook":
UrlRedirect.objects.filter(id=pk) \
.update(total_count=F('total_count') + 1,
facebook_count=F('facebook_count') + 1)
if social == "youtube":
UrlRedirect.objects.filter(id=pk) \
.update(total_count=F('total_count') + 1,
youtube_count=F('youtube_count') + 1)
if social == "tiktok":
UrlRedirect.objects.filter(id=pk) \
.update(total_count=F('total_count') + 1,
tiktok_count=F('tiktok_count') + 1)
return HttpResponseRedirect(next_url)
except:
return redirect('home')
def social_correct(text):
for item in FormAnswer.social_choise:
if item[0] == text:
return True
return False
class PublicNoteForm_class(TemplateView, AccessMixin):
template_name = 'NoteForm/PublicNoteForm.html'
def post(self, *args, **kwargs):
try:
pk = int(self.request.POST['noteForm'])
noteForm = NoteForm.objects.get(pk=pk)
social = self.request.POST['social']
if social_correct(social):
questions = FormQuestion.objects.filter(form=noteForm)
success = True
for question in questions:
if self.request.POST.get(f'name_{question.id}', None) is None:
success = False
if success:
formAnswer = FormAnswer.objects.create(
form=noteForm,
join_by=social
)
for question in questions:
AnswerQuestion.objects.create(
question=question,
answer=formAnswer,
text=self.request.POST[f'name_{question.id}']
)
if social == "telegram":
noteForm.telegram_count += 1
if social == "instagram":
noteForm.instagram_count += 1
if social == "facebook":
noteForm.facebook_count += 1
if social == "youtube":
noteForm.youtube_count += 1
if social == "tiktok":
noteForm.tiktok_count += 1
noteForm.total_count += 1
noteForm.save()
messages.success(self.request, "Ma'lumotlaringiz yuborildi!")
except:
messages.error(self.request, "Xatolik")
return redirect('PublicNoteForm_class')
def get_context_data(self, *args, **kwargs):
context = super(PublicNoteForm_class, self).get_context_data(**kwargs)
try:
pk = int(self.request.GET['pk'])
social = self.request.GET['social']
noteForm = NoteForm.objects.get(id=pk)
questions = FormQuestion.objects.filter(form=noteForm)
context['social'] = social
context['noteForm'] = noteForm
context['questions'] = questions
context['success'] = True
except:
context['success'] = False
return context
class NewFormRedirect_class(TemplateView, AccessMixin):
template_name = 'NoteForm/add.html'
def post(self, *args, **kwargs):
try:
name = self.request.POST['name']
questions = self.request.POST.getlist('field')
image = self.request.FILES['image']
noteForm = NoteForm()
noteForm.name = name
noteForm.image = image
noteForm.company = self.request.user.company
noteForm.save()
for question in questions:
FormQuestion.objects.create(
form=noteForm,
name=question
)
except:
pass
return redirect('redirect_list')
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class ShowFormResults_class(TemplateView, AccessMixin):
template_name = 'NoteForm/showResults.html'
noteForm = None
def get_context_data(self, *args, **kwargs):
context = super(ShowFormResults_class, self).get_context_data(**kwargs)
questions = FormQuestion.objects.filter(form=self.noteForm)
answers = FormAnswer.objects.filter(form=self.noteForm)
answerQuestions = list(AnswerQuestion.objects.filter(answer__form=self.noteForm).values(
'id', 'question', 'answer', 'text'
))
formAnswers = []
for answer in answers:
answersQs = []
for item in answerQuestions:
if item['answer'] == answer.id:
answersQs.append(item)
answerDic = {
"id": answer.id,
"date": answer.date.strftime("%Y-%m-%d %H:%M"),
"join_by": answer.join_by,
"answers": []
}
for question in questions:
answerQ = None
for item in answersQs:
if item['question'] == question.id:
answerQ = item
if answerQ is not None:
answerDic['answers'].append(answerQ['text'])
else:
answerDic['answers'].append("")
formAnswers.append(answerDic)
context['formAnswers'] = formAnswers
context['questions'] = questions
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
try:
pk = kwargs['pk']
self.noteForm = NoteForm.objects.get(id=pk)
except:
return redirect('redirect_list')
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class AddShopping_class(TemplateView, AccessMixin):
template_name = 'shopping/add.html'
lead = None
def post(self, *args, **kwargs):
try:
data = self.request.POST
Shopping.objects.create(
company=self.request.user.company,
lead=self.lead,
product_id=int(data['product']),
count=int(data['count']),
price=int(data['price']),
payment_type_id=int(data['payment_type']),
comment=data['comment'],
amount=int(data['count']) * int(data['price']),
account=self.request.user,
)
except:
pass
response = redirect('edit')
response['Location'] += f'?id={self.lead.id}'
return response
def get_context_data(self, *args, **kwargs):
context = super(AddShopping_class, self).get_context_data(**kwargs)
context['products'] = Product.objects.filter(company=self.request.user.company)
context['payment_types'] = Payment_type.objects.filter(company=self.request.user.company)
context['lead'] = self.lead
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
try:
pk = kwargs['pk']
self.lead = Lead.objects.get(id=pk)
except:
return redirect('board')
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
class EditShopping_class(TemplateView, AccessMixin):
template_name = 'shopping/edit.html'
lead = None
shopping = None
def post(self, *args, **kwargs):
try:
data = self.request.POST
self.shopping.product_id = int(data['product'])
self.shopping.count = int(data['count'])
self.shopping.price = int(data['price'])
self.shopping.payment_type_id = int(data['payment_type'])
self.shopping.comment = data['comment']
self.shopping.amount = int(data['count']) * int(data['price'])
self.shopping.account = self.request.user
self.shopping.save()
except:
pass
response = redirect('edit')
response['Location'] += f'?id={self.lead.id}'
return response
def get_context_data(self, *args, **kwargs):
context = super(EditShopping_class, self).get_context_data(**kwargs)
context['products'] = Product.objects.filter(company=self.request.user.company)
context['payment_types'] = Payment_type.objects.filter(company=self.request.user.company)
context['lead'] = self.lead
context['shopping'] = self.shopping
return context
def dispatch(self, request, *args, **kwargs):
if not request.user.is_authenticated:
return self.handle_no_permission()
try:
lead_pk = kwargs['pk']
shop_pk = kwargs['shop_pk']
self.lead = Lead.objects.get(id=lead_pk)
self.shopping = Shopping.objects.get(id=shop_pk, company=self.request.user.company)
except:
return redirect('board')
# if not request.user.company.active:
# return redirect('cabinet')
return super().dispatch(request, *args, **kwargs)
@login_required
def delete_simple_redirect(request, pk):
try:
urlRedirect = UrlRedirect.objects.get(id=pk)
if urlRedirect.company == request.user.company:
urlRedirect.delete()
except:
pass
return redirect('redirect_list')
@login_required
def delete_form_redirect(request, pk):
try:
noteForm = NoteForm.objects.get(id=pk)
if noteForm.company == request.user.company:
AnswerQuestion.objects.filter(answer__form=noteForm).delete()
noteForm.delete()
except:
pass
return redirect('redirect_list')
| fnabiyevuz/crm | board/views.py | views.py | py | 32,513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "xlwt.Workbook",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "xlwt.XFStyle",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "xlwt.Font",
"... |
2022759755 | import logging as log
import math
from itertools import islice
import torch
from nltocode.dataset import EdgePathTransform
from nltocode.grammar.grammargraphvisitor import GrammarGraphVisitor
class BeamSearch():
def __init__(self,
model,
grammargraph,
codegenerator,
num_beams,
disable_constraint_mask,
max_beam_length,
max_num_predicted_results,
batch_size=None,
mode='full',
treat_empty_results_as_invalid=False,
keep_invalid_results=False,
report_every_n_tokens=None,
report_k_best_beams=None,
):
'''
For default beam search behaviour, set beam_search_mode = 'reduced' and max_num_predicted_results == num_beams
batch_size defaults to num_beams and needs to be reduced if out-of-memory errors occur
'''
super().__init__()
self.model = model
self.grammargraph = grammargraph
self.sos_id = self.grammargraph.graph['sentence_start_id']
self.eos_id = self.grammargraph.graph['sentence_end_id']
self.list_end_id = self.grammargraph.graph['list_end_id']
self.grammargraphvisitor = GrammarGraphVisitor(self.grammargraph)
self.codegenerator = codegenerator
self.k_beams = num_beams
self.disable_constraint_mask = disable_constraint_mask
self.max_beam_length = max_beam_length
self.edge_path_transform = EdgePathTransform(model.max_path_depth)
self.max_num_predicted_results = max_num_predicted_results
self.batch_size = batch_size if batch_size is not None else num_beams
self.mode = mode
self.treat_empty_results_as_invalid = treat_empty_results_as_invalid
self.keep_invalid_results = keep_invalid_results
self.report_every_n_tokens = report_every_n_tokens
self.report_k_best_beams = report_k_best_beams
def perform_beam_search(self, nl_seq, char_seq=None, return_edge_order_seqs=False):
device = nl_seq.device
nl_seq_transposed = nl_seq.transpose(0, 1).tolist()
nl_seq_str = [self.decode_nl_seq(n) for n in nl_seq_transposed]
log.info("Input device = %s, shape = %s", device, nl_seq.size())
log.info("Input sequence(s): %s" % nl_seq_transposed)
log.info("Input text(s): %s" % nl_seq_str)
self.edge_path_transform.device = device
nl_len = nl_seq.size(0)
nl_count = nl_seq.size(1)
assert nl_count == 1, "Only one NL input can be processed at a time"
if char_seq is not None:
assert char_seq.size(0) == nl_len
assert char_seq.size(1) == nl_count
max_charseq_len = char_seq.size(2)
else:
max_charseq_len = None
src_encoding = self.model.encode_src(nl_seq, char_seq)
d_model = src_encoding.size(2)
ast_seq_tensor = torch.tensor([[self.sos_id]], dtype=torch.long, device=device)
scores = torch.tensor([0.0], device=device)
paths = [[]]
edge_order_seqs = [[]]
edge_order_paths = [[]]
result_beams = []
worst_score_to_be_returned = float("inf")
ast_seq_len = ast_seq_tensor.size(0)
num_live_beams = ast_seq_tensor.size(1)
num_active_beams = self.compute_num_active_beams(result_beams)
while ast_seq_len <= self.max_beam_length and num_live_beams > 0 and num_active_beams > 0:
nl_seq_expanded = nl_seq.expand((nl_len, num_live_beams))
char_seq_expanded = expand_or_none(char_seq, (nl_len, num_live_beams, max_charseq_len))
src_encoding_expanded = src_encoding.expand((nl_len, num_live_beams, d_model))
allowed_next_token_lists, edge_order_paths, edge_order_seqs, paths = self.apply_visit_grammargraph(
ast_seq_tensor, edge_order_paths, edge_order_seqs, num_live_beams, paths)
edge_order_seqs_transformed = [
self.edge_path_transform(filter_ast_nodes(seq)).to(device)
for seq in edge_order_seqs
]
edge_order_seq_tensor = torch.stack(edge_order_seqs_transformed, dim=1)
log_probs = self.model(nl_seq_expanded, char_seq_expanded, ast_seq_tensor, edge_order_seq_tensor,
src_encoding_expanded)
# Dim: batch_size x vocab_size
if not self.disable_constraint_mask:
next_tokens_log_probs = compute_next_token_allowed_log_probs(log_probs, allowed_next_token_lists)
else:
next_tokens_log_probs = log_probs[-1, :, :]
top_k_values, top_k_vocab_ids = torch.topk(next_tokens_log_probs, num_active_beams, dim=-1, sorted=False)
top_k_shape = top_k_values.shape
top_k_prev_scores = scores.unsqueeze(-1).expand(top_k_shape)
top_k_scores = top_k_prev_scores - top_k_values
top_k_is_retained = (top_k_scores <= worst_score_to_be_returned).logical_and(top_k_scores != float('inf'))
retained_beam_ids = torch.arange(num_live_beams).unsqueeze(-1).expand(top_k_shape)[top_k_is_retained]
retained_vocab_ids: torch.Tensor = top_k_vocab_ids[top_k_is_retained]
retained_scores = top_k_scores[top_k_is_retained]
retained_is_finished = retained_vocab_ids == self.eos_id
retained_is_unfinished = retained_is_finished.logical_not()
finished_beam_ids = retained_beam_ids[retained_is_finished]
for finished_beam_id in finished_beam_ids.tolist():
finished_ast_seq_tensor_prefix = ast_seq_tensor[:, finished_beam_id]
finished_vocab_id = torch.tensor([self.eos_id], dtype=torch.long, device=device)
finished_ast_seq = torch.cat((finished_ast_seq_tensor_prefix, finished_vocab_id), dim=0)
finished_score = scores[finished_beam_id]
finished_ast_seq_list = finished_ast_seq.tolist()
code_snippet = self.codegenerator.generate_code(finished_ast_seq_list)
if self.treat_empty_results_as_invalid:
code_snippet_is_valid = bool(code_snippet)
else:
code_snippet_is_valid = code_snippet is not None
if code_snippet_is_valid or self.keep_invalid_results:
result_beam = finished_ast_seq_list, finished_score, code_snippet
if return_edge_order_seqs:
finished_path = paths[finished_beam_id]
finished_edge_order_seq = edge_order_seqs[finished_beam_id]
finished_edge_order_path = edge_order_paths[finished_beam_id]
_, _, edge_order_seq, _ = self.visit_grammargraph(
torch.tensor(self.eos_id, device=device),
finished_path,
finished_edge_order_seq,
finished_edge_order_path
)
result_beam = result_beam + (edge_order_seq,)
log.debug('Finished beam: %s', result_beam)
result_beams.append(result_beam)
result_beams.sort(key=lambda tup: tup[1])
# result_score_stats = pandas.Series([tup[1] for tup in result_beams], dtype=float)
# log.debug("Result score stats: %s", result_score_stats.describe())
if len(result_beams) >= self.max_num_predicted_results:
result_beams = result_beams[:self.max_num_predicted_results]
worst_beam_to_be_returned = result_beams[-1]
worst_score_to_be_returned = worst_beam_to_be_returned[1]
log.debug("New maximum score to be returned: %.3f", worst_score_to_be_returned)
else:
log.debug("Illegal AST seq: %s", self.decode_ast_seq(finished_ast_seq_list))
unfinished_scores = retained_scores[retained_is_unfinished]
unfinished_beam_ids = retained_beam_ids[retained_is_unfinished]
unfinished_vocab_ids = retained_vocab_ids[retained_is_unfinished]
num_live_beams = min(num_active_beams, unfinished_scores.size(0))
live_scores, unfinished_live_indexes = unfinished_scores.topk(num_live_beams, largest=False, sorted=True)
live_beam_ids = unfinished_beam_ids[unfinished_live_indexes]
live_ast_seq_tensor = ast_seq_tensor[:, live_beam_ids]
live_vocab_ids = unfinished_vocab_ids[unfinished_live_indexes]
if self.report_every_n_tokens and (ast_seq_len % self.report_every_n_tokens == 0):
self.log_report(ast_seq_tensor, log_probs, scores)
live_beam_ids_list = live_beam_ids.tolist()
ast_seq_tensor = torch.cat((live_ast_seq_tensor, live_vocab_ids.unsqueeze(0)), dim=0)
paths = [paths[i] for i in live_beam_ids_list]
edge_order_seqs = [edge_order_seqs[i] for i in live_beam_ids_list]
edge_order_paths = [edge_order_paths[i] for i in live_beam_ids_list]
scores = live_scores
ast_seq_len = ast_seq_tensor.size(0)
num_live_beams = ast_seq_tensor.size(1)
num_active_beams = self.compute_num_active_beams(result_beams)
log.debug('BEAMS: %s', result_beams)
return result_beams
def log_report(self, ast_seq_tensor, log_probs, scores):
ast_seq_len = ast_seq_tensor.size(0)
num_live_beams = ast_seq_tensor.size(1)
log.info("Current AST sequence length: %s" % ast_seq_len)
num_reported_beams = self.report_k_best_beams or num_live_beams
reported_ast_seqs = ast_seq_tensor.transpose(0, 1).tolist()
reported_scores = scores.tolist()
for i, reported_ast_seq in islice(enumerate(reported_ast_seqs), num_reported_beams):
token_scores = score_ast_seq_tensor(ast_seq_tensor, log_probs)
reported_ast_seq_dec = self.decode_ast_seq(reported_ast_seq)
reported_token_scores = [0] + token_scores[:, i].tolist()
reported_token_score_strs = ["%4.1e" % token_score for token_score in reported_token_scores]
reported_beam_tokens_with_scores = list(zip(reported_ast_seq_dec, reported_token_score_strs))
log.info("Score %9.6f: %s" % (reported_scores[i], reported_beam_tokens_with_scores))
def decode_nl_seq(self, nl_seq):
return decode_nl_seq(nl_seq, self.grammargraph, self.model.vocabtgt_size - self.model.vocabsrc_size)
def decode_ast_seq(self, ast_seq):
return decode_ast_seq(ast_seq, self.grammargraph)
def compute_num_active_beams(self, result_beams):
if self.mode == 'full':
return self.k_beams
elif self.mode == 'reduced':
return max(0, self.k_beams - len(result_beams))
elif self.mode == 'scaled':
return math.ceil(self.k_beams * (1 - len(result_beams) / self.max_num_predicted_results))
else:
raise ValueError("Unknown beam search mode '%s'" % self.mode)
def apply_visit_grammargraph(self, ast_seq_tensor, edge_order_paths, edge_order_seqs, num_live_beams, paths):
graphvisitor_results = (
self.visit_grammargraph(
ast_seq_tensor[-1, i],
paths[i],
edge_order_seqs[i],
edge_order_paths[i]
)
for i in range(num_live_beams)
)
allowed_next_token_lists, paths, edge_order_seqs, edge_order_paths = zip(*graphvisitor_results)
return allowed_next_token_lists, edge_order_paths, edge_order_seqs, paths
def visit_grammargraph(self, node, path, edge_order_seq, edge_order_path):
return self.grammargraphvisitor.visit_graph_edge_order_path_beam_search(
node.item(),
path.copy(),
edge_order_seq.copy(),
edge_order_path.copy()
)
def compute_next_token_allowed_log_probs(log_probs, allowed_next_token_lists):
allowed_log_probs = torch.full(log_probs.shape[1:], fill_value=float("-inf"), device=log_probs.device)
for i, allowed_next_token_list in enumerate(allowed_next_token_lists):
allowed_next_tokens = torch.tensor(allowed_next_token_list, dtype=torch.long,
device=log_probs.device)
allowed_log_probs[i, allowed_next_tokens] = log_probs[-1, i, allowed_next_tokens]
if allowed_next_tokens.size(0) == 0:
log.warning("No allowed successor tokens in beam %s at step %s", i, log_probs.size(0))
return allowed_log_probs
def expand_or_none(t: torch.Tensor, shape):
if t is not None:
char_seq_expanded = t.expand(shape)
else:
char_seq_expanded = None
return char_seq_expanded
def decode_nl_seq(nl_seq, grammargraph, vocabsrc_offset):
suffix_length = 11
nl_seq_dec = [grammargraph.nodes[vocabsrc_offset + t]['label'][:-suffix_length] for t in nl_seq]
nl_seq_str = ''.join(nl_seq_dec[1:-1]).replace('▁', ' ')
return nl_seq_str
def decode_ast_seq(ast_seq, grammargraph):
return [grammargraph.nodes[t]['label'] for t in ast_seq]
def filter_ast_nodes(edge_order_seq):
return [
eo_path
for eo_path, node_type
in edge_order_seq
if node_type in ('node', 'strliteral', 'literal', 'special')
]
def score_ast_seq_tensor(ast_seq_tensor, log_probs):
return -log_probs.gather(-1, ast_seq_tensor[1:, :].unsqueeze(-1)).squeeze(-1)
| SmartDataAnalytics/codeCAI | nl2codemodel/src/nltocode/beamsearch.py | beamsearch.py | py | 13,758 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "nltocode.grammar.grammargraphvisitor.GrammarGraphVisitor",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "nltocode.dataset.EdgePathTransform",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 62,
"usage_type... |
13650180009 |
'''
openweathermap.org
api_key = f54b13c3c6ecf6d1d51c6be402b095dc
https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={API key}
'''
import requests
APP_ID = "f54b13c3c6ecf6d1d51c6be402b095dc"
# response = requests.get('https://google.com')
# print(response)
def getWeatherData(lat, lon):
global APP_ID
print(APP_ID)
response = requests.get('https://api.openweathermap.org/data/2.5/weather?lat={lat}&lon={lon}&appid={APP_ID}')
print(response.json())
return response.json()
# for test...
# getWeatherData(37.22349686935068, 127.18721018723127) | moduchobo/mj_metrooo | MyApp/realtimeWeather 2.py | realtimeWeather 2.py | py | 592 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
}
] |
33981754058 | # # https://www.youtube.com/watch?v=bD05uGo_sVI&list=PL-osiE80TeTt2d9bfVyTiXJA-UTHn6WwU&index=36
# '''
# Generators - How to use them and the benefits you receive
# Generator will not hold any value while printing hence it is good for performance - it is just holding current value from the loop
# '''
import memory_profiler
import random
import time
name = ['John','Corey','Adam','Stev','Rick','Thomas']
majors = ['Math','Science','History','Computer','Geography','Business']
print ('Memory (Before holding records) : {}MB'.format(memory_profiler.memory_usage()))
# this will consume more memory when we use list for holding 1000000 records
def people_list(num_people):
result = []
for i in range(num_people):
person = { 'id': 'i',
'name': random.choice(name),
'major': random.choice(majors)
}
result.append(person)
return result
def people_generator(num_people):
for i in range(num_people):
person = { 'id': 'i',
'name': random.choice(name),
'major': random.choice(majors)
}
yield person
# t1 = time.process_time()
# people = people_list(1000000)
# t2 = time.process_time()
#
# print('Memory using List (After holding records) : {} MB'.format(memory_profiler.memory_usage()))
# print('Took {} Seconds'.format(t2-t1))
# print('\n')
T1 = time.process_time()
peopleGen = people_generator(1000000)
T2 = time.process_time()
print('Memory using Generator (After holding records) : {} MB'.format(memory_profiler.memory_usage()))
print('Took {} Seconds'.format(T2-T1))
| techtolearn/Learning | Cory_Basic/Prg36_generatroPerfomance.py | Prg36_generatroPerfomance.py | py | 1,634 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "memory_profiler.memory_usage",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "random.choic... |
44008043108 | import pygame
# import twitter
from pygame.locals import *
from io import BytesIO
import urllib.request as ur
pygame.init()
alotfiles = "D:\\Users\\Charles Turvey\\Pictures\\Art\\Alots\\"
alotpath = alotfiles + "Blank.png"
alot = pygame.image.load_extended(alotpath)
w = alot.get_width()
h = alot.get_height()
lft = w
rgt = 0
top = h
btm = 0
alotalpha = pygame.surfarray.array_alpha(alot)
for i in range(w):
for j in range(h):
if alotalpha[i][j] == 0:
lft = min(lft, i)
rgt = max(rgt, i)
top = min(top, j)
btm = max(btm, j)
framew = rgt - lft
frameh = btm - top
screen = pygame.display.set_mode((w, h))
alot.convert()
# Distilled from https://github.com/hardikvasa/google-images-download/blob/master/google-images-download.py
search = "%20".join(input(">>> ").split())
url = 'https://www.google.com/search?q=' + search + \
'&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
headers = {'User-Agent':
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"}
req = ur.Request(url, headers=headers)
resp = ur.urlopen(req)
respData = str(resp.read())
print(respData)
urls = []
for i in range(10):
start_line = respData.find('"class="rg_meta"')
start_content = respData.find('"ou"', start_line + 1)
end_content = respData.find(',"ow"', start_content + 1)
url = str(respData[start_content + 6: end_content - 1])
print(url)
urls.append(url)
respData = respData[end_content:]
def preparealot(imageno):
otherfile = alotpath
while len(urls) > 1:
try:
otherfile = BytesIO(ur.urlopen(urls[imageno]).read())
break
except Exception as E:
failedurl = urls.pop(imageno)
imageno %= len(urls)
print("ERROR WITH {}: {}".format(failedurl, str(E)))
other = pygame.image.load_extended(otherfile)
other.convert()
imagew = other.get_width()
imageh = other.get_height()
scalew = framew / imagew
scaleh = frameh / imageh
scale = max(scalew, scaleh)
other = pygame.transform.scale(other, (int(imagew * scale), int(imageh * scale)))
screen.blit(other, (lft + int((framew - other.get_width()) / 2), top + int((frameh - other.get_height()) / 2)))
screen.blit(alot, (0, 0))
pygame.display.flip()
pointer = 0
preparealot(0)
while True:
for e in pygame.event.get():
if e.type == QUIT:
quit()
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
exit()
elif e.key == K_RIGHT:
pointer = (pointer + 1) % len(urls)
preparealot(pointer)
elif e.key == K_LEFT:
pointer = (pointer - 1) % len(urls)
preparealot(pointer)
elif e.key == K_RETURN:
pygame.image.save(screen,
alotfiles + "Autogenerated\\alot_of_{}.png".format("_".join(search.split("%20"))))
| ninjafrostpn/PythonProjects | Alotter.py | Alotter.py | py | 3,066 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pygame.image.load_extended",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.surfar... |
19876279061 | import sys
import os
from setuptools import setup, find_packages, Extension
# Import setupext ONLY if you want custom triggers
# If you only use prep_cmd, you only need to include setupext in the package
# import setupext
os.chdir(os.path.dirname(sys.argv[0]) or ".")
'''
==============================================================================
PACKAGE DATA
==============================================================================
'''
# You _SHOULD_ set these
name = 'setupext'
version = '0.24.8' # oldver: '0.24.7'
description = 'Utility classes and methods for using setuptools'
install_requires = [
]
packages = find_packages()
license = (
'License :: OSI Approved :: '
'GNU Lesser General Public License v3 or later (LGPLv3+)'
)
# The following are optional
url = 'https://github.com/sundarnagarajan/setupext'
download_url = 'https://github.com/sundarnagarajan/setupext.git'
author = 'Sundar Nagarajan'
author_email = 'sun.nagarajan@gmail.com'
maintainer = author
# maintainer_email = author_email
classifiers = [
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: Implementation :: PyPy',
('License :: OSI Approved :: '
'GNU Lesser General Public License v3 or later (LGPLv3+)'),
]
zip_safe = False
'''
==============================================================================
ADDITIONAL DATA FILES
==============================================================================
'''
data_dirs = [
'doc',
]
'''
==============================================================================
CUSTOM STEPS
==============================================================================
'''
'''
==============================================================================
ADDITIONAL keyword args to setup()
==============================================================================
'''
ADDL_KWARGS = dict(
)
'''
==============================================================================
DO NOT CHANGE ANYTHING BELOW THIS
==============================================================================
'''
def prepare_c_source(cmd):
'''
cmd-->str: command with arguments
'''
import setupext
setupext.config['build_ext']['pre']['cmdlist'] = [cmd]
return setupext.get_cmdclass()
def get_longdesc(default=''):
'''
Returns-->str
'''
files = ['README.rst', 'README.md', 'README.txt', 'README']
for f in files:
try:
return open(f, 'r').read()
except:
continue
return default
def get_dirtree(topdir, dirlist=[]):
'''
topdir-->str: must be name of a dir under current working dir
dirlist-->list of str: must all be names of dirs under topdir
'''
ret = []
curdir = os.getcwd()
if not os.path.isdir(topdir):
return ret
os.chdir(topdir)
try:
for dirname in dirlist:
if not os.path.isdir(dirname):
continue
for (d, ds, fs) in os.walk(dirname):
for f in fs:
ret += [os.path.join(d, f)]
return ret
except:
return ret
finally:
os.chdir(curdir)
# Make some keywords MANDATORY
for k in [
'name', 'version', 'description', 'license',
]:
if k not in locals():
raise Exception('Missing mandatory keyword: ' + k)
# keywords that are computed from variables
dirlist = locals().get('data_dirs', None)
if isinstance(dirlist, list):
package_dir = {name: name}
package_data = {name: get_dirtree(topdir=name, dirlist=dirlist)}
long_description = get_longdesc(description)
known_keywords = [
'name', 'version', 'packages', 'description', 'license',
'install_requires', 'requires', 'setup_requires',
'package_dir', 'package_data',
'zip_safe', 'classifiers', 'keywords',
'long_description', 'url', 'download_url',
'author', 'author_email', 'maintainer', 'maintainer_email',
]
kwdict = {}
for k in known_keywords:
if k in locals():
kwdict[k] = locals()[k]
if 'prep_cmd' in locals():
kwdict['cmdclass'] = prepare_c_source(locals()['prep_cmd'])
# Do not compile ext_modules during build phase - wasteful
if len(sys.argv) > 1 and sys.argv[1] != 'build':
if 'ext_modules' in locals():
kwdict['ext_modules'] = [Extension(**x) for x in
locals()['ext_modules']]
# Additional keywords specified by user - shouldn't be required, normally
kwdict.update(ADDL_KWARGS)
setup(**kwdict)
| sundarnagarajan/setupext | setup.py | setup.py | py | 4,594 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.chdir",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
... |
24338300355 | """ distributions
Provides likelihood functions for distributions and a method to assign the
most likely distribution to observed numeric data.
discreteData used to determine whether data is discrete or not.
"""
import numpy as np
from scipy import special as sp
from scipy import stats
import sys
def discreteData(data : np.array, discreteProportion = .5) -> tuple[bool,str,float]:
"""
discreteData tests if the data is integral, non-negative, and
proportion of classes less than 'discreteProportion' to determine if discrete
returns a tuple of (whether_discrete,discrete_family,loss) where
discrete_family=loss=None if not discrete
"""
# Integer and Non-negative
if data.dtype != "int32" or np.min(data) < 0 :
return (False, None, None)
buckets = np.unique(data, return_counts=True)
# Proportion test
if len(buckets) > discreteProportion*len(data):
return (False, None, None)
else:
# Test discrete families
families = ["geometric","poisson"]
familyLoss = []
# Geoemtric
geoLoss = geometricLoss(data,buckets)
familyLoss.append(geoLoss)
# Poisson
poiLoss = poissonLoss(data, buckets)
familyLoss.append(poiLoss)
minIndex = np.argmin(familyLoss)
return (True, families[minIndex], familyLoss[minIndex])
def ctsData(data : np.array) -> tuple[str,float]:
"""
Handles Continuous Data
Returns tuple of distribution family with lowest Loss,
and corresponding loss.
"""
sortedData = np.sort(data)
families = ["exponential","gaussian"]
familyLoss = []
# Exponential
expLoss = sys.float_info.max
if sortedData[0]>=0:
expLoss = exponentialLoss(sortedData)
familyLoss.append(expLoss)
# Gaussian
gauLoss = gaussianLoss(sortedData)
familyLoss.append(gauLoss)
# Find minimum
minIndex = np.argmin(familyLoss)
return (families[minIndex],familyLoss[minIndex])
def distributionOptimizer(data : np.array, discreteProportion = .5) -> tuple[str,float]:
"""
Selects optimal distribution based on standardized loss
Returns tuple ("Distribution_Family",Loss)
"""
# Discrete Test
discrete = discreteData(data, discreteProportion)
if discrete[0] == False:
return ctsData(data)
else:
return discrete
def geometricLoss(data : np.array, buckets : tuple[np.array,np.array]) -> float:
"""
Geometric Loss Function
Accepts an np.array of data, and its buckets: (unique_values,counts)
Assumes buckets are non-negative integers
"""
n = len(data)
# Begin at zero
if buckets[0] == 0:
p_hat = 1/np.mean(data)
expectations = n*p_hat*(1-p_hat)**(buckets[0]-1)
# Begin above zero
else:
p_hat = 1/(1+np.mean(data))
expectations = n*p_hat*(1-p_hat)**buckets[0]
sd_hat = ((1-p_hat)/p_hat**2)**0.5
relativeLoss = np.sum((expectations-buckets[1])**2)/(sd_hat/n**0.5)
return relativeLoss
def poissonLoss(data : np.array, buckets : np.array) -> float:
"""
Poisson Loss Function
Assumes buckets are non-negative integers
"""
n = len(data)
lambda_hat = np.mean(data)
expectations = n*(lambda_hat**buckets[1]*np.exp(-lambda_hat))/sp.factorial(buckets[1])
sd_hat = lambda_hat**0.5
relativeLoss = np.sum((expectations-buckets[1])**2)/(sd_hat/n**0.5)
return relativeLoss
def gaussianLoss(data : np.array) -> float:
"""
Gaussian Loss Function
Assumes data sorted.
"""
n = len(data)
mu_hat = np.mean(data);
sd_hat = 1/n*np.sum((data-mu_hat)**2)
# Goodness-of-fit test :
# Squared difference between data and quantile at theoretical
percentiles = np.append(np.linspace(1/n,1-1/n,n-1),1-1/n**2)
norm_dist = stats.norm(mu_hat,sd_hat)
relativeLoss = np.sum((data-norm_dist.pdf(percentiles))**2)/(sd_hat/n**0.5)
return relativeLoss
def exponentialLoss(data : np.array) -> float:
"""
Exponential Loss Function
Assumes data sorted.
"""
n = len(data)
lambda_hat = 1/np.mean(data)
sd_hat = 1/lambda_hat
# Goodness-of-fit test :
percentiles = np.append(np.linspace(1/n,1-1/n,n-1),1-1/n**2)
exp_dist = stats.expon(lambda_hat,sd_hat)
relativeLoss = np.sum((data-exp_dist.pdf(percentiles))**2)/(sd_hat/n**0.5)
return relativeLoss
| stevec12/Stat-Tools | StatsTools/distributions.py | distributions.py | py | 4,438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.min",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.argmin",
"line_numb... |
34398023581 | import shutil
import torch
from dataset import *
from utils import *
from settings_benchmark import *
from dataset import writer
from torch.utils.tensorboard import SummaryWriter
all_dataset = prepareDatasets()
print(f"Models: {[name for name in models]}")
print(f"Datasets: {[name for name in all_dataset]}")
# 自检:尝试加载每个模型一次,以确保每个模型都能加载
print("Trying to load each model...")
for name_model in models:
model:nn.Module = models[name_model]()
root_result = "result"
if not os.path.exists(root_result):
os.mkdir(root_result)
id_card = 0
# 手动选择显卡
count_card = torch.cuda.device_count()
if count_card > 1:
while True:
s = input(f"Please choose a video card number (0-{count_card-1}): ")
if s.isdigit():
id_card = int(s)
if id_card >= 0 and id_card < count_card:
break
print("Invalid input!")
continue
device_cuda = torch.device(f'cuda:{id_card}' if torch.cuda.is_available() else 'cpu')
print(f"\n\nVideo Card {id_card} will be used.")
for name_model in models:
root_result_model = os.path.join(root_result, name_model)
if not os.path.exists(root_result_model):
os.mkdir(root_result_model)
# foo = models[name_model]()
# total = sum([param.nelement() for param in foo.parameters()])
# print("Model:{}, Number of parameter: {:.3f}M".format(name_model, total/1e6))
# continue
# 在各个训练集上训练
for name_dataset in all_dataset:
dataset = all_dataset[name_dataset]
trainLoader = DataLoader(dataset=dataset['train'],batch_size=2, shuffle=True, drop_last=False, num_workers=0)
valLoader = DataLoader(dataset=dataset['val'])
testLoader = DataLoader(dataset=dataset['test'])
model:nn.Module = models[name_model]().to(device_cuda)
root_result_model_dataset = os.path.join(root_result_model, name_dataset)
path_flag = os.path.join(root_result_model_dataset, f"finished.flag")
if os.path.exists(path_flag):
continue
if os.path.exists(root_result_model_dataset):
shutil.rmtree(root_result_model_dataset)
os.mkdir(root_result_model_dataset)
print(f"\n\n\nCurrent Model:{name_model}, Current training dataset: {name_dataset}")
log_section = f"{name_model}_{name_dataset}"
funcLoss = DiceLoss() if 'loss' not in dataset else dataset['loss']
thresh_value = None if 'thresh' not in dataset else dataset['thresh']
# optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad ], lr=1e-3, weight_decay=1e-4)
optimizer = torch.optim.Adam([param for param in model.parameters() if param.requires_grad ],
lr=1e-4, weight_decay=0.001)
NUM_MAX_EPOCH = 300
bestResult = {"epoch":-1, "dice":-1}
ls_best_result = []
for epoch in range(NUM_MAX_EPOCH):
torch.cuda.empty_cache()
log_section_parent = f"{log_section}"
result_train = traverseDataset(model=model, loader=trainLoader,
thresh_value=thresh_value,
log_section=f"{log_section_parent}_{epoch}_train",
log_writer=writer if epoch%5==0 else None,
description=f"Train Epoch {epoch}", device=device_cuda,
funcLoss=funcLoss, optimizer=optimizer)
for key in result_train:
writer.add_scalar(tag=f"{log_section}/{key}_train",
scalar_value=result_train[key],
global_step=epoch
)
# val
result = traverseDataset(model=model, loader=valLoader,
thresh_value=thresh_value,
log_section=f"{log_section_parent}_{epoch}_val",
log_writer=writer if epoch%5==0 else None,
description=f"Val Epoch {epoch}", device=device_cuda,
funcLoss=funcLoss, optimizer=None)
for key in result:
writer.add_scalar(tag=f"{log_section}/{key}_val",
scalar_value=result[key],
global_step=epoch
)
dice = result['dice']
print(f"val dice:{dice}. ({name_model} on {name_dataset})")
if dice > bestResult['dice']:
bestResult['dice'] = dice
bestResult['epoch'] = epoch
ls_best_result.append("epoch={}, val_dice={:.3f}".format(epoch, dice))
print("best dice found. evaluating on testset...")
result = traverseDataset(model=model, loader=testLoader,
thresh_value=thresh_value,
log_section=None,
log_writer=None,
description=f"Test Epoch {epoch}", device=device_cuda,
funcLoss=funcLoss, optimizer=None)
ls_best_result.append(result)
path_json = os.path.join(root_result_model_dataset, "best_result.json")
with open(path_json, "w") as f:
json.dump(ls_best_result,f, indent=2)
path_model = os.path.join(root_result_model_dataset, 'model_best.pth')
torch.save(model.state_dict(), path_model)
else:
threshold = 100
if epoch - bestResult['epoch'] >= threshold:
print(f"Precision didn't improve in recent {threshold} epoches, stop training.")
break
with open(path_flag, "w") as f:
f.write("training and testing finished.")
| nhjydywd/OCTA-FRNet | run_benchmark.py | run_benchmark.py | py | 6,001 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.cuda.device_count",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_av... |
41019512289 | import re
from django.views.generic.base import TemplateView
from natsort import humansorted
from ..conf import settings
from ..utils import get_branches
from ..utils import get_tags
regex_hotfix = re.compile(settings.RELEASE_DASHBOARD_FILTER_MRXXX)
regex_mr = re.compile(r"^mr.+$")
def _projects_versions(
projects, regex=None, tags=True, branches=True, master=False
):
res = []
for project in projects:
info = {
"name": project,
}
if tags:
info["tags"] = humansorted(get_tags(project, regex), reverse=True)
if branches:
info["branches"] = humansorted(
get_branches(project, regex), reverse=True
)
if master:
info["branches"].append("master")
res.append(info)
return res
def _common_versions(context, tags=True, branches=True):
common_versions = {"tags": set(), "branches": set()}
for project in context["projects"]:
if tags:
common_versions["tags"] |= set(project["tags"])
if branches:
common_versions["branches"] |= set(project["branches"])
context["common_versions"] = {
"tags": humansorted(common_versions["tags"], reverse=True),
"branches": humansorted(common_versions["branches"], reverse=True),
}
def _hash_versions(data, projects):
result = {}
for i in projects:
try:
result[i] = data["version_{0}".format(i)]
except (KeyError, AttributeError):
pass
return result
class Index(TemplateView):
template_name = "release_dashboard/index.html"
| sipwise/repoapi | release_dashboard/views/__init__.py | __init__.py | py | 1,627 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "conf.settings.RELEASE_DASHBOARD_FILTER_MRXXX",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "conf.settings",
"line_number": 10,
"usage_type": "name"
},
{
"api_na... |
30478020917 | from collections import OrderedDict, namedtuple
from decimal import Decimal
from string import ascii_uppercase
def tabular(table, widths):
def sandwich(delim, contents):
return delim + delim.join(contents) + delim
def cell(value, width):
return ' ' + str(value).ljust(width - 2)
def cells(row):
return sandwich('|', (cell(col, w) for col, w in zip(row, widths))) + '\n'
horiz_rule = sandwich('+', ('-' * (w - 1) for w in widths)) + '\n'
return sandwich(horiz_rule, (cells(row) for row in table))
# In Python 3.7, this should be a @dataclass instead:
class Item(namedtuple('Item', 'name price')):
def __new__(cls, name, price):
return super().__new__(cls, name, Decimal(price))
def main():
menu_items = OrderedDict(zip(ascii_uppercase, [
Item('The "Big Boy" Burger', '16.99'),
Item('French Fries', '5.99'),
Item('Currie sauce', '19.99'),
Item('Napkins with Chokolates', '10.50'),
Item('Juice Box', '89.01'),
Item('Takeout', '18.99'),
]))
print(
tabular([['The Restaurant at the End of the Universe']], [36 + 9]) +
tabular(
(('{0} {1.name}'.format(*stuff), '${1.price}'.format(*stuff))
for stuff in menu_items.items()),
[36, 9]
)
)
total = Decimal('0.00')
while True:
print('Total: ${0}'.format(total))
selection = input("Select a letter or 'done': ")
if selection == 'done':
break
total += menu_items[selection].price
print('Final total: ${0}'.format(total))
if __name__ == '__main__':
main() | hzhamed/Fast-Food-Menu-Calculator | Menu.py | Menu.py | py | 1,687 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "collections.OrderedDict",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "string... |
39003004891 | # Q.4 Write a python code to find the data distributions using box plot for Income.csv file
# Code
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
dataframe = pd.read_csv("csv_data/income.csv")
print(dataframe.head())
print(dataframe.isnull().values.any())
age = dataframe["age"]
JobType = dataframe["JobType"]
SalStat = dataframe["SalStat"]
dataframe.head()
sns.boxplot(x=age)
plt.show()
age = dataframe["age"]
sns.boxplot(x=age, y=SalStat)
plt.show()
dataframe = pd.DataFrame(data=dataframe, columns=["age", "JobType", "SalStat"])
boxplot = sns.boxplot(x="age", y="SalStat",data=pd.melt(dataframe))
boxplot.axes.set_title("Distribution of Income", fontsize=16)
boxplot.set_xlabel("Conditions", fontsize=14)
boxplot.set_ylabel("Values", fontsize=14)
plt.show()
| ShubhamNagure/Assignments | AI_ML_KR/Assignment#2/question4.py | question4.py | py | 795 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "seaborn.boxplot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
40612286632 | from picamera import PiCamera
from picamera.array import PiRGBArray
import cv2
import time
camera=PiCamera()
camera.resolution=(800,600)
camera.framerate=32
rawCaptures = PiRGBArray(camera,size=(800,600))
time.sleep(0.2)
for rawCapture in camera.capture_continuous(rawCaptures,format="bgr",use_video_port=True):
image = rawCapture.array
cv2.imshow("Image",image)
rawCaptures.truncate(0)
k= cv2.waitKey(20) & 0xff
if k==27:
break
cv2.destroyAllWindows()
| sacrrie/people_finding | video-recording/record_by_frame_rgb.py | record_by_frame_rgb.py | py | 486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "picamera.PiCamera",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "picamera.array.PiRGBArray",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
... |
23111160181 | from rest_framework.exceptions import APIException
from django.db.models import Q
from stock_maintain.models import PriceList, QuarterlyFinancial, DividendInformation
from stock_setup_info.models import Stock, StockManagement
def get_stock_by_code(query_params):
""" Get stock by the stock code provided"""
try:
stock_code = query_params.get("stock_code")
except Exception:
raise APIException(detail="Provide proper stock code")
return Stock.objects.filter(stock_code=stock_code)[:1][0]
def stock_search_like_name(query_params):
""" Get all stock with like search of name provided"""
try:
stock_code = query_params.get("stock_code")
except Exception:
raise APIException(detail="Provide proper search variable")
return Stock.objects.filter(
Q(stock_code__contains=stock_code) | Q(name__contains=stock_code)
)
def stock_statistics(query_params):
"""
Return list of a stock's statistics
:param query_params: query parameters on the url
:return: list of all statistics
"""
try:
stock_code = query_params.get("stock_code")
if stock_code is None or stock_code == "":
raise APIException(detail="Provide stock code for search")
except Exception:
raise APIException(detail="Provide stock code for search")
stock = Stock.objects.filter(stock_code=stock_code)[:1][0]
stock_in_market = PriceList.objects.filter(sec_code=stock_code).order_by(
"-price_date"
)[:1][0]
stock_analytics_info = QuarterlyFinancial.objects.filter(sec_code=stock_code)[:1][0]
stock_dividend_info = DividendInformation.objects.filter(
sec_code=stock_code
).order_by("-year", "-period_number")[:1]
valuation = {
"probability": {
"pat_margin": 0.0,
"roe": 0.0,
"dps": 0.0,
"period": 0,
},
"valuation": {
"pe_ratio": 0.0,
"net_asset_per_share": 0.0,
"eps": 0.0,
"dividend_yield": 0.0,
},
"company_statistics": {
"registrars": "",
"listing_date": "",
"year_end": "",
"share_outstanding": stock.outstanding_shares,
},
"kpi": {
"turnover_growth": 0.0,
"pat_growth": 0.0,
"net_assets_growth": 0.0,
"assets_growth": 0.0,
},
}
if stock_analytics_info and stock_dividend_info:
pat_margin = round(
(stock_analytics_info.profit_after_tax / stock_analytics_info.turnover)
* 100,
2,
)
roe = round(
(stock_analytics_info.profit_after_tax / stock_analytics_info.net_assets)
* 100,
2,
)
switcher = {
1: "1st Quarter",
2: "2nd Quarter",
3: "3rd Quarter",
4: "4th Quarter",
}
period = switcher.get(stock_analytics_info.period_number, "Invalid Quarter")
valuation["probability"] = {
"pat_margin": pat_margin,
"roe": roe,
"dps": stock_dividend_info.dividend_value,
"period": period,
}
eps = round(stock_analytics_info.profit_after_tax / stock.outstanding_shares, 2)
naps = round(stock_analytics_info.net_assets / stock.outstanding_shares, 2)
pe_ratio = round(stock_in_market.price / eps, 2)
valuation["valuation"] = {
"pe_ratio": pe_ratio,
"net_asset_per_share": naps,
"eps": eps,
"dividend_yield": period,
}
valuation["company_statistics"] = {
"registrars": stock.registrar,
"listing_date": stock.list_date,
"year_end": stock.year_end,
"share_outstanding": stock.outstanding_shares,
}
valuation["kpi"] = {
"turnover_growth": round(
(
(
stock_analytics_info.turnover
- stock_analytics_info.previous_turnover
)
/ stock_analytics_info.previous_turnover
)
* 100,
2,
),
"pat_growth": round(
(
(
stock_analytics_info.profit_after_tax
- stock_analytics_info.previous_profit_after_tax
)
/ stock_analytics_info.previous_profit_after_tax
)
* 100,
2,
),
"net_assets_growth": round(
(
(
stock_analytics_info.net_assets
- stock_analytics_info.previous_net_assets
)
/ stock_analytics_info.previous_net_assets
)
* 100,
2,
),
"assets_growth": round(
(
(
stock_analytics_info.total_assets
- stock_analytics_info.previous_total_assets
)
/ stock_analytics_info.previous_total_assets
)
* 100,
2,
),
}
return valuation
def stock_competitors(query_params):
"""
Return list of all competitors to current stock
:param query_params: query parameters on the url
:return: list of all competitors stock
"""
try:
stock_code = query_params.get("stock_code")
if stock_code is None or stock_code == "":
raise APIException(detail="Provide stock code for search")
except Exception:
raise APIException(detail="Provide stock code for search")
stock = Stock.objects.filter(stock_code=stock_code)[:1][0]
stocks = Stock.objects.filter(
~Q(stock_code=stock_code) & Q(industry=stock.industry)
)
competitors = []
for stock_record in stocks:
price_data = PriceList.objects.filter(
sec_code=stock_record.stock_code
).order_by("-price_date")[:1][0]
stock_details = {
"stock_code": stock_record.stock_code,
"price": price_data.price,
"change_data": str(price_data.offer_bid_sign)
+ str(f"{price_data.x_change:.2f}")
+ "%",
"market_data": price_data.volume,
}
competitors.append(stock_details)
return competitors
def stock_mgt_search_by_type(query_params):
"""
Return list of all stockmanagement to current stock
:param query_params: query parameters on the url
:return: list of all stock management by type
"""
try:
stock_code = query_params.get("stock_code")
query_type = query_params.get("query_type")
if stock_code is None or stock_code == "":
raise APIException(detail="Provide stock code for search")
if query_type is None or query_type == "":
raise APIException(detail="Provide query type for search")
except Exception:
raise APIException(detail="Provide stock code for search")
# stock = Stock.objects.filter(stock_code=stock_code)[:1][0]
stock_mgt = StockManagement.objects.filter(
stock__stock_code__contains=stock_code, management_type=query_type
)
return stock_mgt
| Maxcutex/stockman_project | stock_setup_info/services.py | services.py | py | 7,473 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rest_framework.exceptions.APIException",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "stock_setup_info.models.Stock.objects.filter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "stock_setup_info.models.Stock.objects",
"line_number": 15... |
10019831327 | import csv
from django.http import HttpResponse
from django.shortcuts import render
from .models import account
from .models import deposite
from .models import withdraw
# Create your views here.
def showindex(request):
return render(request,"Home.html")
def head(request):
return render(request,"Head.html")
def body(request):
return render(request,"Body.html")
def accountdetails(request):
res = account.objects.all()
return render(request,"Account_View_Details.html",{"msg":res})
def open(request):
return render(request,"AccountOpen.html")
def accountopen(request):
name=request.POST.get("t1")
cno=request.POST.get("t2")
email=request.POST.get("t4")
address=request.POST.get("t5")
a1=account(name,cno,email,address)
a1.save()
res = account.objects.all()
return render(request,"AccountOpen.html",{"msg":"Details Saved"})
def accountcsvfile(request):
response=HttpResponse(content_type="text/csv")
response['Content-Disposition']='attachment';filename="accountdetails.csv"
wr=csv.writer(response)
res = account.objects.all()
for x in res:
wr.writerow([x.name,x.cno,x.email,x.address])
return response
def manager(request):
return render(request,"ManagerLogin.html")
def managerdetails(request):
m_uname=request.POST.get("m1")
m_upass=request.POST.get("m2")
if m_uname=="manager" and m_upass=="bank":
return render(request,"ManagerAll_Details.html")
else:
return render(request,"ManagerLogin.html")
def deposite1(request):
return render(request,"Deposite.html")
def depositedetails(request):
a_no=request.POST.get("a1")
a_name=request.POST.get("a2")
a_money=request.POST.get("a3")
a_date=request.POST.get("a5")
a_type=request.POST.get("a4")
acc=deposite(D_no=a_no,D_name=a_name,D_money=a_money,A_date=a_date,A_type=a_type)
acc.save()
return render(request,"Deposite.html",{"message":"Amount Deposited"})
def depo(request):
ans=deposite.objects.all()
return render(request,"Deposite_View_Details.html",{"msg1":ans})
def depositeCSVfile(request):
http=HttpResponse(content_type="text/csv")
http['Content-Disposition']='attachment';filename="deposite.csv"
w=csv.writer(http)
res2=deposite.objects.all()
for x in res2:
w.writerow([x.D_no,x.D_name,x.A_money,x.A_date,x.A_type])
return http
def withdraw(request):
return render(request,"Withdraw_View_Details.html")
def withdrawlogin(request):
return render(request,"Withdraw.html")
def withdrawlogindetails(request):
a_no = request.POST.get("a1")
a_name = request.POST.get("a2")
dep2 = deposite.objects.filter(D_no=a_no, D_name=a_name)
if dep2:
dep3=deposite.objects.values_list()
return render(request,"Withdraw_View_Details.html",{"ans1":dep3})
else:
return render(request,"Withdraw.html")
#dep1=deposite.objects.filter(D_no=a_no,D_name=a_name)
#if not dep1:
# return render(request,"Withdraw.html")
#else:
# return render(request, "Withdraw_View_Details.html")
def withdrawdetails(request):
w_money=request.POST.get("y1")
print(w_money)
#dep=deposite.objects.filter(D_money=w_money)
#print(dep)
w_date=request.POST.get("y2")
w_type=request.POST.get("y3")
from firebase import firebase as fab
fa=fab.FirebaseApplication("https://djangoweb1-ec1db.firebaseio.com/",None)
fa.put("https://djangoweb1-ec1db.firebaseio.com/","withdraw/",None)
return render(request,"Withdraw_View_Details.html") | prasadnaidu1/django | bankinfo/prasad/views.py | views.py | py | 3,584 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name"... |
29515253943 | import base64
from plugins.googleapi.Google import Create_Service
from email.mime.text import MIMEText
from sql_data.schemas import Mail
def create_mail_service(user_id):
CLIENT_SECRET_FILE = 'plugins/googleapi/Credentials/keys.json'
API_SERVICE_NAME = 'gmail'
API_VERSION = 'v1'
SCOPES = ['https://www.googleapis.com/auth/gmail.send']
service = Create_Service(CLIENT_SECRET_FILE, API_SERVICE_NAME, API_VERSION, SCOPES, user_id=user_id)
return service
def create_message(sender, to, subject, message_text ,to_method='bcc'):
message = MIMEText(message_text)
message[to_method] = to
message['from'] = sender
message['subject'] = subject
raw_message = base64.urlsafe_b64encode(message.as_string().encode("utf-8"))
return {
'raw': raw_message.decode("utf-8")
}
def send_message(user_id,subject,to, message):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
Returns:
Sent Message.
"""
to = ','.join(to)
raw_message = create_message(sender=user_id,to=to,subject=subject,message_text=message)
Mailer = create_mail_service(user_id=user_id)
try:
message = (Mailer.users().messages().send(userId=user_id, body=raw_message)
.execute())
return {
"id":message['id'],
"subject":subject,
"labelIds":message['labelIds']
}
except Exception as e:
print ('An error occurred: %s' % e)
def send_mapped_message(user_id: str,subject: str, message: str,map_data: str ,mail_col:int):
"""Send an email message.
Args:
service: Authorized Gmail API service instance.
user_id: User's email address. The special value "me"
can be used to indicate the authenticated user.
message: Message to be sent.
"""
Mailer = create_mail_service(user_id=user_id)
Mails = []
try:
#send mail to each person in the sender's list
for user in map_data['Data']:
mapped_message = message
mapped_subject = subject
for index,header in enumerate(map_data['Header']):
mapped_message = mapped_message.replace(f'<<{header}>>',user[index])
mapped_subject = mapped_subject.replace(f'<<{header}>>',user[index])
# print(f'head {header},\nmap :{user[index]},\nmessage :{mapped_message},\nsubject :{mapped_subject}')
raw_message = create_message(sender=user_id,to=user[mail_col],subject=mapped_subject,message_text=mapped_message ,to_method='cc')
mail = (Mailer.users().messages()
.send(userId=user_id, body=raw_message)
.execute())
Mails.append({
"id":mail['id'],
"reciverId":user[mail_col],
"subject":subject,
"labelIds":mail['labelIds']
})
return Mails
except Exception as e:
print ('An error occurred: %s' % e)
return e
| r0king/EventOn-api | plugins/gmail/mail.py | mail.py | py | 2,985 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "plugins.googleapi.Google.Create_Service",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "email.mime.text.MIMEText",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "base64.urlsafe_b64encode",
"line_number": 21,
"usage_type": "call"
}
] |
1107559956 | import os
import subprocess
from docutils import nodes
from docutils.parsers.rst import Directive
from tutorials import tutorials
import sphinx_material
class Tutorials(Directive):
def run(self):
output = list()
# General text
intro = f"""
<p>
<b>viskex</b> is accompanied by a few tutorials, that can be run on JupyterLab through a local installation of the library, or on cloud computing platforms such as Google Colab and Kaggle.
</p>
"""
output.append(nodes.raw(text=intro, format="html"))
# Tutorials
cards = list()
for num in tutorials.keys():
data = tutorials[num]
steps = data["steps"]
buttons = ""
for step_description in steps:
step_files = steps[step_description]
if len(step_files) == 1:
buttons += self._button(step_description, step_files)
else:
buttons += self._dropdown(step_description, step_files)
card_num = self._card(
num=num,
title=data["title"],
description=data["description"],
buttons=buttons
)
cards.append(card_num)
output.append(nodes.raw(text=self._card_container(cards), format="html"))
return output
@staticmethod
def _card_container(cards):
card_container = """
<div class="tutorial-container">
<div class="tutorial-row">
"""
for card in cards:
card_container += """
<div class="tutorial-column">
""" + card + """
</div>
"""
card_container += """
</div>
</div>
"""
return card_container
@staticmethod
def _card(num, title, description, buttons):
return f"""
<div class="tutorial-card">
<div class="tutorial-number">
{num}
</div>
<div class="tutorial-content">
<h3 class="tutorial-title">
{title}
</h3>
<div class="tutorial-description">
<p>{description}</p>
</div>
<div class="tutorial-buttons">
{buttons}
</div>
</div>
</div>
"""
@classmethod
def _dropdown(cls, step_description, libraries_urls):
dropdown = f"""
<div id="tutorial-dropdown-{cls._dropdown_id}" class="jq-dropdown jq-dropdown-tip">
<ul class="jq-dropdown-menu">
"""
for (library, url) in libraries_urls.items():
dropdown += f"""
<li><a href="{url}" target="_blank">{cls._library_image(library)} {library}</a></li>
"""
dropdown += f"""
</ul>
</div>
<div class="tutorial-button" data-jq-dropdown="#tutorial-dropdown-{cls._dropdown_id}">{step_description}</div>
"""
cls._dropdown_id += 1
return dropdown
_dropdown_id = 1
@classmethod
def _button(cls, step_description, libraries_urls):
assert len(libraries_urls) == 1
library = list(libraries_urls.keys())[0]
url = libraries_urls[library]
return f"""
<a href="{url}" target="_blank"><div class="tutorial-button">{cls._library_image(library)} {step_description}</div></a>
"""
@staticmethod
def _library_image(library):
if library == "dolfinx":
logo = "_static/images/dolfinx-logo.png"
elif library in ("firedrake", "firedrake + netgen"):
logo = "_static/images/firedrake-logo.png"
else:
raise RuntimeError("Invalid type")
return f'<img src="{logo}" style="vertical-align: middle; width: 25px">'
def on_build_finished(app, exc):
if exc is None and app.builder.format == "html":
# Unescape at symbol
subprocess.run(
"find " + str(app.outdir) + " -type f -not -path '*/\.git/*' -exec sed -i 's/%40/@/g' {} +",
shell=True)
# Mark current page as active
subprocess.run(
"find " + str(app.outdir) + " -type f -not -path '*/\.git/*' -exec sed -i 's/"
+ '<li class="md-tabs__item"><a href="#" class="md-tabs__link">'
+ "/"
+ '<li class="md-tabs__item md-tabs__item_current"><a href="#" class="md-tabs__link">'
+ "/g' {} +",
shell=True)
# Disable going to submenus on mobile
subprocess.run(
"find " + str(app.outdir) + " -type f -not -path '*/\.git/*' -exec sed -i 's/"
+ 'id="__toc"'
+ "/"
+ 'id="__toc_disabled"'
+ "/g' {} +",
shell=True)
# Add further SEO tags
seo_head = """
<script type="application/ld+json">
{
"@context": "http://schema.org",
"@type": "SoftwareApplication",
"name": "viskex - interactive visualization for firedrake and FEniCSx",
"description": "viskex is a library for the interactive visualization of finite element simulations within jupyter notebooks in JupyterLab, Google Colab or Kaggle. viskex is currently developed at Università Cattolica del Sacro Cuore by Dr. Francesco Ballarin.",
"keywords": "viskex, firedrake, FEniCS, finite element, jupyter, visualization",
"softwareHelp": "https://viskex.github.io/",
"operatingSystem": "Linux",
"applicationCategory": "Simulation",
"inLanguage": "en",
"license": "https://opensource.org/licenses/MIT",
"url": "https://github.com/viskex/viskex"
}
</script>
<meta property="og:title" content="viskex - interactive visualization for firedrake and FEniCSx" />
<meta property="og:description" content="viskex is a library for the interactive visualization of finite element simulations within jupyter notebooks in JupyterLab, Google Colab or Kaggle. viskex is currently developed at Università Cattolica del Sacro Cuore by Dr. Francesco Ballarin." />
<meta property="og:type" content="website" />
<meta property="og:site_name" content="viskex" />
<meta property="og:url" content="https://viskex.github.io/" />
<meta property="og:image" content="https://viskex.github.io/_images/viskex-logo.png" />
"""
index = os.path.join(app.outdir, "index.html")
with open(index, "r") as f:
index_content = f.read()
index_content = index_content.replace("<head>", "<head>\n" + seo_head)
with open(index, "w") as f:
f.write(index_content)
# Get tutorial nbconvert html files from git, if not already available
for num in tutorials.keys():
for step_files in tutorials[num]["steps"].values():
for url in step_files.values():
if not os.path.exists(os.path.join(app.outdir, url)):
html_generated = subprocess.run(
"mkdir -p " + os.path.dirname(os.path.join(app.outdir, url)) + " && " +
"git show origin/gh-pages:" + url + "> " + os.path.join(app.outdir, url),
shell=True, capture_output=True)
if html_generated.returncode != 0:
raise RuntimeError(
"HTML generation of " + url + " not found\n"
+ "stdout contains " + html_generated.stdout.decode() + "\n"
+ "stderr contains " + html_generated.stderr.decode() + "\n")
create_sitemap_bak = sphinx_material.create_sitemap
def create_sitemap(app, exc):
create_sitemap_bak(app, exc)
if exc is None and app.builder.format == "html":
# Add version and encoding to the top of sitemap.xml
subprocess.run(
"sed -i '1s/^/<?xml version=\"1.0\" encoding=\"UTF-8\"?>/' " + os.path.join(app.outdir, "sitemap.xml"),
shell=True)
# Remove trailing index.html from sitemap.xml
subprocess.run(
"sed -i 's|/index.html||g' " + os.path.join(app.outdir, "sitemap.xml"),
shell=True)
sphinx_material.create_sitemap = create_sitemap
def setup(app):
app.add_directive("tutorials", Tutorials)
app.connect("build-finished", on_build_finished)
return {
"version": "0.1",
"parallel_read_safe": True,
"parallel_write_safe": False,
}
| viskex/viskex.github.io | _ext/ext.py | ext.py | py | 8,064 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "docutils.parsers.rst.Directive",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "docutils.nodes.raw",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "docutils.nodes",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "tutor... |
718879097 | from crossref.restful import Works
import requests
import re
class CrossRef:
def __init__(self):
self.works = Works()
self.metadata = None
def get_doi_metadata(self, doi: str) -> dict:
try:
self.metadata = self.works.doi(doi=doi)
return self.metadata
except Exception:
return None
def get_info_from_orcid(self, orcid: str):
if not orcid:
return None, None
orcid_api_url = f"https://pub.orcid.org/v3.0/{orcid}/person"
headers = {"Accept": "application/json"}
response = requests.get(orcid_api_url, headers=headers)
if response.status_code == 200:
data = response.json()
emails = data.get("emails", None)
email = emails.get("email", None) if emails else None
email = email[0].get("email", None) if email and len(email) > 0 else None
final_email = email if email else None
urls = data.get("researcher-urls", None)
url = urls.get("researcher-url", None) if urls else None
url = url[0].get("url", None) if url and len(url) > 0 else None
if url and "value" in url:
final_url: str = url["value"]
if final_url.startswith("https://"):
final_url = re.sub(r"https://", "http://", final_url)
elif not final_url.startswith("http://"):
final_url = "http://" + final_url.strip()
else:
final_url = None
return final_email, final_url
return None, None
def get_contributors(self):
if self.metadata:
contributors = self.metadata.get("author", None)
return contributors
def get_author_affiliations(self, contributor: dict):
affiliations = contributor.get("affiliation", None)
return affiliations
def get_author_full_name(self, contributor: dict) -> (str, str):
family_name = contributor.get("family", None)
given_name = contributor.get("given", None)
return family_name, given_name
def get_author_orcid(self, contributor: dict) -> str:
orcid = contributor.get("ORCID", None)
return orcid
def get_subjects(self) -> list[str]:
if self.metadata:
subjects = self.metadata.get("subject", None)
return subjects
def get_title(self) -> str:
if self.metadata:
title = self.metadata.get("title", None)
return title
def get_abstract(self) -> str:
if self.metadata:
abstract = self.metadata.get("abstract", None)
return abstract | jamino30/dandiset-metadata-filler | clients/crossref.py | crossref.py | py | 2,780 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "crossref.restful.Works",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 44,
"usage_type": "call"
}
] |
14619387009 | #画像処理_SURF
#参考文献
#https://python-debut.blogspot.com/2020/02/blog-post_24.html
#https://stackoverflow.com/questions/52305578/sift-cv2-xfeatures2d-sift-create-not-working-even-though-have-contrib-instal
#opencvのバージョンを下げた
import numpy as np
import cv2
img = cv2.imread('dog.jpg')
#特徴抽出するための箱を作る.
surf = cv2.xfeatures2d.SURF_create() #SURF
#読み込んだ画像の特徴検出
kp_surf = surf.detect(img)
#画像への特徴点の書き込み
img_surf = cv2.drawKeypoints(img, kp_surf, None, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imwrite('img_surf.png', img_surf)
#opencvのバージョンを下げたため,以下の関数が使えなかった.
#cv2.imshow('img_surf.png', img_surf)
#cv2.waitKey(0)
#cv2.destroyAllWindows() | mitanihayato/shinjinkadai | 4_surf.py | 4_surf.py | py | 814 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d.SURF_create",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.xfeatures2d",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "cv2.drawK... |
45138590856 | import sys
import argparse
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
# original arguments
parser.add_argument("--epoch", type=int, default=0,
help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=400,
help="number of epochs of training")
parser.add_argument("--data_path", type=str, default="../../data")
parser.add_argument("--output_path", type=str, default="./results/flickr")
parser.add_argument("--batch_size", type=int, default=32,
help="size of the batches")
parser.add_argument("--n_cpu", type=int, default=2,
help="number of cpu threads to use during batch generation")
# experiment specifics
parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
# input/output sizes
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=("resize_and_crop", "crop", "scale_width", "scale_width_and_crop", "scale_shortside", "scale_shortside_and_crop", "fixed", "none"))
parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
# for displays
parser.add_argument('--display_winsize', type=int, default=400, help='display window size')
# for discriminator
parser.add_argument('--ndf', type=int, default=64, help='# of discrim filters in first conv layer')
parser.add_argument('--lambda_feat', type=float, default=10.0, help='weight for feature matching loss')
parser.add_argument('--lambda_vgg', type=float, default=10.0, help='weight for vgg loss')
parser.add_argument('--no_ganFeat_loss', action='store_true', help='if specified, do *not* use discriminator feature matching loss')
parser.add_argument('--no_vgg_loss', action='store_true', help='if specified, do *not* use VGG feature matching loss')
parser.add_argument('--gan_mode', type=str, default='hinge', help='(ls|original|hinge)')
# for generator
parser.add_argument('--ngf', type=int, default=64, help='# of gen filters in first conv layer')
# for instance-wise features
parser.add_argument('--no_instance', action='store_true', help='if specified, do *not* add instance map as input')
parser.add_argument('--use_vae', action='store_true', help='enable training with an image encoder.')
self.initialized = True
return parser
def parse(self):
parser = argparse.ArgumentParser()
opt = self.initialize(parser).parse_args()
opt.isTrain = self.isTrain # train or test
# Set semantic_nc based on the option.
# This will be convenient in many places
opt.semantic_nc = opt.label_nc + \
(1 if opt.contain_dontcare_label else 0) + \
(0 if opt.no_instance else 1)
self.opt = opt
return self.opt
| HBX-hbx/CGAN_jittor_landscape | options/base_options.py | base_options.py | py | 4,573 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 61,
"usage_type": "call"
}
] |
2689293408 | #========================================================================================#========================================================================================
# LIBRARIES
#========================================================================================
import BH_Physics as BHP
from fitool import fit
import numpy as np
from scipy.interpolate import interp1d
from loadmodules import *
from parse_particledata import parse_particledata
import os
from scipy.optimize import curve_fit
from collections import Counter
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
from matplotlib import rc
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
plt.close('all')
#========================================================================================
# FUNCTIONS
#========================================================================================
def sersic_prof1( x, ba, b, n):
return ba * np.exp(-(x / b)**n ) # warning!! b is not Reff here!!!
def exp_prof(x, da, h):
return da * np.exp(-x / h )
def total_profile(x, da, h, ba, b, n):
y = exp_prof(x, da, h) + sersic_prof1(x, ba, b, n)
return (y)
def Exponential_Disk( r, sigma0, Rd ):
return sigma0*np.exp( -r/Rd )
def Sersic_Profile( r, sigma_eff, Reff, n ):
b = lambda n: 2*n - 1/3. + 4/(405.*n) + 46/(25515.*n**2) + 131/(1148175*n**3) - 2194697/(30690717750*n**4)
return sigma_eff*np.exp( -b(n)*( (r/Reff)**(1./n) - 1 ) )
def Total_Profile( r, a, b, c, d, e ):
return Exponential_Disk( r, a, b ) + Sersic_Profile( r, c, d, e )
#========================================================================================
# PARAMETERS
#========================================================================================
Simulation0 = 'cosmobh00'
Simulation1 = 'cosmobh01'
#========================================================================================
# PLOTS
#========================================================================================
StellarMass0 = np.loadtxt( './data/stellarmass_%s.txt'%Simulation0 )
StellarMass1 = np.loadtxt( './data/stellarmass_%s.txt'%Simulation1 )
#GasMass0 = np.loadtxt( './data/gasmass_%s.txt'%Simulation0 )
#GasMass1 = np.loadtxt( './data/gasmass_%s.txt'%Simulation1 )
#DMMass0 = np.loadtxt( './data/dmmass_%s.txt'%Simulation0 )
#DMMass1 = np.loadtxt( './data/dmmass_%s.txt'%Simulation1 )
DT0 = abs(np.loadtxt( './data/morphologies_%s.txt'%Simulation0 ))
DT1 = abs(np.loadtxt( './data/morphologies_%s.txt'%Simulation1 ))
Color0 = np.loadtxt( './data/color_%s.txt'%Simulation0 )
Color1 = np.loadtxt( './data/color_%s.txt'%Simulation1 )
Spos0 = np.loadtxt( './data/spos_%s.txt'%Simulation0 )
Spos1 = np.loadtxt( './data/spos_%s.txt'%Simulation1 )
#Matching
index = np.zeros( len(StellarMass0) )
residual = np.zeros( len(StellarMass0) )
ID1 = np.arange( len(StellarMass1) )
for i,r in enumerate(Spos0):
index[i] = int(ID1[np.argsort( np.linalg.norm( Spos1 - r, axis=1 ) )[0]])
residual[i] = np.sort( np.linalg.norm( Spos1 - r, axis=1 ) )[0]
index = index.astype(int)
#Discarding not-matching haloes
mask_nm = (residual<=1e2)#*(np.log10(StellarMass1[index])+10>9.5)#*(Color0>0.6)
plt.figure( figsize=(15,10) )
mask_nm = (residual<=1e2)*(np.log10(StellarMass1[index])+10>9.5)*(Color1[index]>0.6)
plt.scatter( np.log10(StellarMass0[mask_nm]) + 10 , Color0[mask_nm], alpha = 0.5, marker = 's', s=40, vmin=0, vmax=1, zorder = 10, color='blue')#, c=DT0[mask_nm], cmap = 'jet_r' )
plt.scatter( np.log10(StellarMass1[index[mask_nm]]) + 10 , Color1[index[mask_nm]], alpha = 0.5, marker = 'o', s=40, vmin=0, vmax=1, zorder = 10, color='red')#, c=DT1[index[mask_nm]], cmap = 'jet_r' )
#cb1 = plt.colorbar()
#cb1.set_label( 'D / T', fontsize = 18 )
for i in xrange( sum(mask_nm) ):
#plt.arrow( [np.log10(StellarMass0[mask_nm])[i] + 10, np.log10(StellarMass1[index[mask_nm]])[i] + 10], [Color0[mask_nm][i], Color1[index[mask_nm]][i]]
plt.arrow( np.log10(StellarMass0[mask_nm])[i] + 10, Color0[mask_nm][i], (np.log10(StellarMass1[index[mask_nm]])[i] + 10)-(np.log10(StellarMass0[mask_nm])[i] + 10), Color1[index[mask_nm]][i]-Color0[mask_nm][i], ls='--', color='gray', length_includes_head=True, width=0.0005 )
plt.xlim( [9.0,12] )
plt.ylim( [0.1,0.8] )
plt.xlabel( '$\log M_{\star} [10^{10}\ M_{\odot}]$', fontsize = 18 )
plt.ylabel( 'g - r', fontsize = 18 )
plt.show()
| sbustamante/Spinstractor | Subhalo_Color_Mass_Matcher.py | Subhalo_Color_Mass_Matcher.py | py | 4,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.close",
"line_number": 21,
"usage_type": "call"
},
{
"api_nam... |
71793784103 | # -*- coding: utf-8 -*-
"""
API
/api_part/formula_search - search API
/api_part/contexts/<formula_id> - contexts for one formula (with span)
"""
from flask import Flask
from flask_restful import Api
from flask_cors import CORS
from api_part.models import db
from api_part.api import FormulaSearch, FormulaContexts
app = Flask(__name__)
app.config['CORS_HEADERS'] = 'Content-Type'
cors = CORS(app, resorces={r'/api/formula_search/*': {"origins": '*'}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///ice_site.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['JSON_AS_ASCII'] = False
db.app = app
db.init_app(app)
db.create_all()
api = Api(app)
api.add_resource(FormulaSearch, '/api/formula_search')
api.add_resource(FormulaContexts, '/api/contexts/<formula_id>')
if __name__ == '__main__':
app.run(debug=True, port=5010)
| dkbrz/ice_site | api_part/app.py | app.py | py | 854 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "api_part.models.db.app",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "api_part.mode... |
41907938518 | import os
os.environ
from PIL import Image
from torchvision import transforms, models
import torch
import torch.nn as nn
model = models.resnet50(pretrained=True)
num_features = model.fc.in_features
model.fc = nn.Linear(num_features, 8)
model_path = './age_prediction_model/age_best_model.pth'
model.load_state_dict(torch.load(model_path, map_location = torch.device('cpu')))
test_transform = transforms.Compose([
transforms.Resize(128),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
])
model.eval()
image = Image.open('face.jpg').convert('RGB')
image = test_transform(image).unsqueeze(0)
out = model(image)
_, preds = torch.max(out, 1)
d_class = preds[0].item()
label = -1
if d_class > 4:
print("노인 입니다")
os.system("F5.bat")
else:
print("노인이 아닙니다")
os.system("F4.bat")
pass
os.remove('face.jpg')
os.system('python ./hand-gesture-recognition-mediapipe/app.py')
| CSID-DGU/2022-2-SCS4031-EZ_SW | age_prediction_model/age_pred.py | age_pred.py | py | 967 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "torchvision.models.resnet50",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torchvision.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "torch.n... |
14899572028 | import pandas as pd
import altair as alt
import joblib
RECT_SIZE = 13
source = pd.read_json(snakemake.input[0])
df_sb = source.loc[source.Encoding == "zscale", :].copy(deep=True)
df_sb.Encoding = "zzz"
df_sb.F1 = "separator"
dfs, names_imbalanced = [], []
for bf in sorted(source["bio_field"].unique()):
df_tmp = source.loc[source.bio_field == bf, :]
df_ds = source.loc[source.Dataset == "cpp_sanders", :].copy(deep=True)
df_ds.bio_field, df_ds.Dataset, df_ds.F1, df_ds.is_imbalanced = \
bf, f"{bf}_zzz", "separator", 1.0
df_tmp = pd.concat([df_tmp, df_ds]).sort_values("is_imbalanced")
names_imbalanced += list(df_tmp.Dataset.unique())
dfs += [df_tmp]
source = pd.concat([source, df_sb] + dfs)
source.reset_index(inplace=True)
last_ds_name = list(source.Dataset.unique())[-1]
source = source.drop(source[source.Dataset == last_ds_name].index)
names_seq = list(source.loc[source.type == "sequence based", "Encoding"].sort_values().unique())
names_str = list(source.loc[source.type == "structure based", "Encoding"].sort_values().unique())
x_axis = alt.Axis(
tickCount=len(source.Encoding),
labelExpr="datum.label == 'zzz' ? null : datum.label",
labelAngle=-45
)
y_axis = alt.Axis(
tickCount=len(source.Encoding),
labelExpr="substring(datum.label, 4) == 'zzz' ? null : datum.label"
)
sort_x_axis = alt.Sort(alt.SortArray(names_seq + names_str))
sort_y_axis = alt.Sort(alt.SortArray(names_imbalanced))
tooltip = ["Encoding:N", "Dataset:N", "F1:Q", "is_imbalanced:Q"]
url = snakemake.input[0].replace("source", "bio")
source.to_json(url, orient="records")
chart1 = alt.Chart(url).mark_rect(opacity=0.9).encode(
x=alt.X('Encoding:N', axis=x_axis, sort=sort_x_axis),
y=alt.Y('Dataset:N', axis=y_axis, sort=sort_y_axis),
color=alt.Color('F1:Q', scale=alt.Scale(
domain=[0.0, 1.0], range=["#a6bddb", "#023858"]
)),
tooltip=tooltip
)
chart2 = alt.Chart(url).mark_rect(size=RECT_SIZE).encode(
x=alt.X('Encoding:N', axis=x_axis, sort=sort_x_axis),
y=alt.Y('Dataset:N', axis=y_axis, sort=sort_y_axis),
color=alt.Color(
'F1_new:N',
title="Value",
scale=alt.Scale(domain=["NA"], range=["#a6611a"])
),
).transform_calculate(
F1_new="datum.F1 == null ? 'NA' : 'NA'"
).transform_filter(
alt.datum.F1 == None
).properties(
height={"step": RECT_SIZE},
width={"step": RECT_SIZE}
)
chart = (chart1 + chart2)
joblib.dump(chart, snakemake.output[0])
| spaenigs/peptidereactor | nodes/vis/mds_1_Overview/scripts/hm_bio.py | hm_bio.py | py | 2,481 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "pandas.read_json",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "altair.Axis",
"line_n... |
42034957629 | import sys
import logging
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst, GLib
Gst.init(None)
class Pipeline:
def __init__(self, export_dot, mainloop):
self.mainloop = mainloop
self.logger = logging.getLogger(__name__)
self.pipeline = Gst.Pipeline("pipeline")
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
if export_dot:
self.pipelinename = self.pipeline.get_name()
self.bus.connect("message::stream-status", self.export_dot)
self.bus.connect("message::state-changed", self.export_dot)
self.bus.connect("message::warning", self.export_dot)
self.bus.connect("message::info", self.export_dot)
self.bus.connect("message::eos", self.export_dot)
self.bus.connect("message::error", self.export_dot)
self.bus.connect("message::eos", self.on_eos)
self.bus.connect("message::error", self.on_error)
# Create pipeline
self.videosrc = Gst.ElementFactory.make("videotestsrc", None)
self.videosrc.set_property("pattern", 18)
self.pipeline.add(self.videosrc)
self.videocaps = Gst.ElementFactory.make("capsfilter",None)
self.videocaps.set_property("caps", Gst.Caps.from_string("video/x-raw,width=800,height=600"))
self.pipeline.add(self.videocaps)
self.videosrc.link(self.videocaps)
self.timeoverlay = Gst.ElementFactory.make("timeoverlay", None)
self.pipeline.add(self.timeoverlay)
self.videocaps.link(self.timeoverlay)
self.videosink = Gst.ElementFactory.make("autovideosink", None)
self.pipeline.add(self.videosink)
self.timeoverlay.link(self.videosink)
def export_dot(self, bus, message):
if message.src.name != self.pipelinename:
return
elif message.type == Gst.MessageType.ERROR:
Gst.debug_bin_to_dot_file_with_ts(self.pipeline, Gst.DebugGraphDetails.ALL, "error")
elif message.type == Gst.MessageType.WARNING:
Gst.debug_bin_to_dot_file_with_ts(self.pipeline, Gst.DebugGraphDetails.ALL, "warning")
elif message.type == Gst.MessageType.INFO:
Gst.debug_bin_to_dot_file_with_ts(self.pipeline, Gst.DebugGraphDetails.ALL, "info")
elif message.type == Gst.MessageType.STREAM_STATUS:
status, _ = message.parse_stream_status()
Gst.debug_bin_to_dot_file_with_ts(self.pipeline, Gst.DebugGraphDetails.ALL, status.value_nick)
elif message.type == Gst.MessageType.STATE_CHANGED:
old, new, pending = message.parse_state_changed()
Gst.debug_bin_to_dot_file_with_ts(self.pipeline, Gst.DebugGraphDetails.ALL, old.value_nick + "_" + new.value_nick)
self.logger.info("New dot file exported.")
def on_eos(self, bus, msg):
self.logger.info("EOS, exit application.")
self.bus.remove_signal_watch()
self.pipeline.set_state(Gst.State.NULL)
self.pipeline = None
self.mainloop.quit()
def on_error(self, bus, message):
self.logger.error("error detected, quit application.")
err, _=message.parse_error()
Gst.error("error detected, quit application: {}".format(err.message))
self.mainloop.quit()
def run(self):
self.pipeline.set_state(Gst.State.READY)
self.pipeline.set_state(Gst.State.PLAYING)
| gscigala/packet-generation | fpm/test_sample/pipeline.py | pipeline.py | py | 3,441 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "gi.require_version",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gst.init",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "gi.repository.Gst",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "logging.getL... |
1597831218 | import numpy as np
import astropy.units as u
from astropy.time import Time
from astropy.table import QTable
import sys
if '/home/serafinnadeau/Python/packages/scintillometry/' not in sys.path:
sys.path.append('/home/serafinnadeau/Python/packages/scintillometry/')
from numpy.lib.format import open_memmap
import os
import time
from utils import DispersionMeasure, imshift
def master(stream2D, w=31250, dm=56.7, s_per_sample=2.56e-4, verbose=True, prometheus=False):
'''
Takes a memmap 2D stream and converts it to a 1D dedispersed intensity
stream.
It does this chunk by chunk, with the time width of each chunk defined by
the input parameter w and dedisperses to the input dm parameter.
'''
# Obtain data for w samples at a time, accounting for lost samples due to
# dedispersion.
dm = DispersionMeasure(dm)
dt = dm.time_delay(800*u.MHz, 400*u.MHz)
time_waste = int(abs(dt.value / s_per_sample) + 1)
print(f'{time_waste} samples lost at the end of array due to dedispersion')
w_eff = w - time_waste # the effective width of each read chunk after dedispersion
N = int(len(stream2D) / w_eff) # the number chunks to be read in
stream1D = np.zeros(N * w_eff)
mask1D = np.zeros(N * w_eff)
if verbose:
t0 = time.time()
chunk_n = -1
verbose_print(chunk_n, N, t0, extra='', prometheus=prometheus)
for chunk_n in range(N):
sample_min = select_chunk(chunk_n, w_eff) # Calculate starting time bin of chunk
if verbose:
verbose_print(chunk_n, N, t0, extra='Reading')
chunk = read_chunk(stream2D, w, sample_min) # Read in chunk
if chunk_n == 0:
rfi_mask = rfi_bands(chunk.T - np.mean(chunk.T, axis=0, keepdims=True))
print('')
print(np.sum(rfi_mask))
print('')
#if verbose:
# verbose_print(chunk_n, N, t0, extra='Masking')
#chunk = mask_chunk(chunk)
if verbose:
verbose_print(chunk_n, N, t0, extra='Dedispersing')
chunk = dedisperse_chunk(chunk, dm, s_per_sample)
if verbose:
verbose_print(chunk_n, N, t0, extra='Adding', prometheus=prometheus)
stream1D = fuse_chunk(stream1D, chunk, sample_min, w_eff, rfi_mask)
if verbose:
verbose_print(chunk_n, N, t0, extra='Complete', rewrite=False, prometheus=prometheus)
print('')
return stream1D
def verbose_print(current, total, t0, extra='', prometheus=False, rewrite=True):
tf = time.time()
ss = int(tf-t0)
hh = ss // 3600
mm = ss // 60 - hh*60
ss = ss % 60
if rewrite:
end = ' \r'
else:
end = ' \n'
print(f'{current+1:0{len(str(total))}}/{total}: '
f'{(current+1)*100/total:05.2f}% complete'
f' -- {hh:02d}:{mm:02d}:{ss:02d} elapsed -- {extra: <20} ',
end=end)
if prometheus:
import prometheus_client as pc
stage, promdir, registry, completion, timing = prometheus
completion.labels(stage=stage).set((current+1)*100/total)
timing.labels(stage=stage).set((tf - t0)/3600)
pc.write_to_textfile(promdir + f'crab_monitoring_{stage}.prom', registry)
return
def rollingmean(x, w, edge=False):
roll = np.convolve(x, np.ones((w,))/w, mode='same')
if edge:
roll[:edge] = roll[edge+1]
roll[-edge:] = roll[-edge-1]
return roll
def select_chunk(chunk_n, w_eff):
'''
Calculates the starting sample of the memmaped 2D stream for a given chunk
number.
'''
sample_min = w_eff * chunk_n
return sample_min
def read_chunk(stream2D, w, sample_min):
'''
Reads in a time chunk of width w time bins, starting at sample_min from a
2D memmaped stream.
Reshapes to (FREQ, TIME) / (1024, w)
'''
#stream2D.seek(sample_min)
#chunk = stream2D.read(w)
chunk = stream2D[sample_min:sample_min+w] * 1
shape = np.shape(chunk)
if shape[1] == 1024:
chunk = chunk.transpose(1,0)
return chunk
def mask_chunk(chunk):
'''
Replaces zero masking of RFI with the mean of the relevant frequency channel
'''
for i in range(len(chunk)): # Loop over frequencies
row2 = chunk[i] * 1
m = (chunk[i] == 0) + np.isnan(chunk[i])
if np.sum(m) != 0:
row2[m] = np.nan # Mask all true zeros to nan
mean = np.nanmean(row2) # Compute the mean of each channel
if np.isnan(mean):
chunk[i][m] = 0 # if channel mean is nan, the fill channel back with 0
else:
chunk[i][m] = mean # Fill gaps in channel with the channel mean value
else:
chunk[i] = chunk[i]
return chunk
def dedisperse_chunk(chunk, dm, s_per_sample):
'''
Dedisperses the chunk with the given dm
'''
freqs = np.linspace(800, 400, 1024, endpoint=False) * u.MHz
dt = dm.time_delay(800*u.MHz, freqs)
chunk = imshift(chunk*1, shiftc=dt.value/s_per_sample)
return chunk
def fuse_chunk(stream1D, chunk, sample_min, w_eff, rfi=False):
'''
Collapses chunk and adds it to the dedispersed 1D stream
'''
if type(rfi) == np.ndarray:
chunk[rfi,:] = 0
chunk = chunk - np.nanmean(chunk.astype(np.float32), axis=1, keepdims=True)
stream = np.sum(chunk, axis=0)
stream1D[sample_min:sample_min+w_eff] = stream[:w_eff]
return stream1D
def correct_stream(stream1D, savedir, N=1000):
'''
flattens the background levels of the intensity stream to better pick out
giant pulses in the search by itteratively subtracting the rolling mean.
Saves the corrected 1D stream as a memmap object.
'''
mean_test = np.nanmean(stream1D)
std_test = np.nanstd(stream1D)
snr_test = (stream1D-mean_test)/std_test
rollmean = rollingmean(snr_test, N, edge=N)
snr_test = snr_test - rollmean
mean_test = np.nanmean(snr_test)
std_test = np.nanstd(snr_test)
snr_test = (snr_test-mean_test)/std_test
rollmean = rollingmean(snr_test, N, edge=N)
snr_test = snr_test - rollmean
mean_test = np.nanmean(snr_test)
std_test = np.nanstd(snr_test)
snr_test = (snr_test-mean_test)/std_test
rollmean = rollingmean(snr_test, N, edge=N)
snr_test = snr_test - rollmean
mean_test = np.nanmean(snr_test)
std_test = np.nanstd(snr_test)
snr_test = (snr_test-mean_test)/std_test
stream1D = open_memmap(savedir+'istream_corr.npy', dtype=np.float32, mode='w+',
shape=np.shape(snr_test))
stream1D[:] = snr_test
return stream1D
def streamsearch(stream1D, splittab, cutoff, banddir, savedir, datestr, timestr,
Nmax=False, Nmin=1024, dm=DispersionMeasure(56.7), output=False):
'''
Searches the corrected 1D stream for signals stonger than 'cutoff' sigma
'''
POS = []
SNR = []
snr_search = stream1D * 1
start_time = Time(splittab.meta['start_time'], format='isot', precision=9)
n_files = splittab.meta['vdif_files']
startskip = splittab.meta['start_skipped']
endskip = splittab.meta['end_skipped']
filecount = n_files - (startskip + endskip)
n_frames = splittab.meta['frames_per_file'] * filecount
samples_per_frame = splittab.meta['samples_per_frame']
binning = splittab.meta['binning']
s_per_sample = splittab.meta['s_per_sample'] * binning
nsamples = n_frames * samples_per_frame
pos = np.nanargmax(snr_search)
signal = snr_search[pos]
snr_search[pos-30:pos+30]= np.nan
snr = (signal - np.nanmean(snr_search[pos-150:pos+150])) / np.nanstd(snr_search[pos-150:pos+150])
i = 0
t0 = time.time()
snr_search[:int(1.11/2.56e-6/100)] = 0
while (snr > cutoff) or (len(POS) < Nmin):
if (len(POS) < Nmax) or (not Nmax):
POS += [pos]
SNR += [snr]
snr_search[pos-30:pos+30] = 0
pos = np.nanargmax(snr_search)
signal = snr_search[pos]
snr_search[pos-30:pos+30] = np.nan
snr = (signal - np.nanmean(snr_search[pos-150:pos+150])) / np.nanstd(snr_search[pos-150:pos+150])
i += 1
t = time.time() - t0
m, s = divmod(t, 60)
h, m = divmod(m, 60)
#print(f'Intensity stream searched for pulses: {len(POS)} pulses found -- '
# f'S/N: {snr:.3f} -- POS: {pos*100*2.56e-6:.3f} -- Time elapsed: '
# f'{int(h):02d}:{int(m):02d}:{int(s):02d}', end=' \r')
print(f'Intenisty stream searched for pulses: {len(POS)} pulses found ')
POS = np.array(POS)
TIME_S = POS * s_per_sample
SNR = np.array(SNR)
MJD = start_time + TIME_S * u.s
# Create Table of GPs to be saved
tab = QTable()
tab.meta = splittab.meta
tab['time'] = (TIME_S * u.s + start_time).isot
tab['off_s'] = TIME_S * u.s
tab['pos'] = POS
tab['snr'] = SNR
tab.sort('pos')
tab.meta['DM'] = dm.value
tab.meta['binning'] = 100
tab.meta['sigma'] = cutoff
tab.meta['start'] = start_time.isot
tab.meta['nsamples'] = nsamples
tab.meta['history'] = ['Intensity stream i_stream.npy saved from ChannelSplit'
f' on vdif files {banddir}*/{datestr}T{timestr}'
'Z_chime_psr_vdif/*',
'i_stream.npy dedispersed and searched for giant pulses']
tab.write(savedir+f'search_tab.hdf5', path='search_info', overwrite=True)
if output:
return tab
return
def rfi_bands(data, Nsigma=4, plot=False):
normdata = data.astype(np.float32) - np.mean(data.astype(np.float32), axis=0, keepdims=True)
std = np.std(normdata.astype(np.float32), axis=0)
x = np.arange(0, std.shape[0])
normmedian = np.median(normdata.astype(np.float32), axis=0)
rfi_mask = std == 0
rfi_count = np.sum(rfi_mask)
rfi_count_old = 0
norm_temp = normmedian*1
while rfi_count != rfi_count_old:
norm_temp -= np.nanmedian(norm_temp)
cut = np.nanstd(norm_temp)
rfi_mask += (norm_temp > Nsigma * cut) + (norm_temp < -Nsigma * cut)
norm_temp[rfi_mask] = np.nan
rfi_count_old = rfi_count * 1
rfi_count = np.sum(rfi_mask)
if plot:
plt.figure(figsize=[18,6])
plt.plot(x[~rfi_mask], norm_temp[~rfi_mask])
plt.hlines([-cut*Nsigma, cut*Nsigma], 0, 1024)
plt.pause(0.1)
return rfi_mask
| Seraf-N/SinglePulseTasks | single_pulse_task/StreamSearch_utils.py | StreamSearch_utils.py | py | 10,765 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "utils.DispersionMeasure",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.