id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6653663 | <filename>update.py
# -*- coding: utf-8 -*-
#!/usr/bin/python3
#@author xuan
#@created 2019/10/19
#@desciption Complete tasks at regular intervals
#system lib
import time
import os
import configparser
import logging
import MySQLdb
from apscheduler.schedulers.background import BackgroundScheduler
from DBUtils.PooledDB import PooledDB
#my lib
from downloader import downloader
import conf
import pool
#配置数据库连接池
db_connectoin_pool = PooledDB(MySQLdb, 5, host=conf.mysql_host, user = conf.mysql_user, passwd = conf.mysql_password, db = conf.mysql_db, port = 3306)
#设置全局的logger
logger = logging.getLogger("update_logger")
logger.setLevel(logging.INFO)
#设置downloader的logger
download_logger = logging.getLogger("download_logger")
download_logger.setLevel(logging.INFO)
#设置全局使用的代理池
proxies = {}
#获取logging的handler
def get_handler(log_file_name):
handler = logging.FileHandler(log_file_name,encoding = 'UTF-8')
handler.setLevel(logging.INFO)
formater = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formater)
return handler
#get a database connection
def get_db():
db = db_connectoin_pool.connection()
#db = pymysql.connect(conf.mysql_host, conf.mysql_user, conf.mysql_password, conf.mysql_db)
cursor = db.cursor()
return db,cursor
#close database connection
def close_db(db,cursor):
cursor.close()
db.close()
#下载或更新小说
def update_fictions(fiction_list):
logger.info('proxies:'+str(proxies))
for fiction in fiction_list:
#每次需要重新设置download_logger的handler
download_handler = get_handler(conf.log_dir + str(fiction[0]) + '.log')
download_logger.addHandler(download_handler)
path = conf.fiction_dir + str(fiction[0])
try:
#存放该小说的文件夹,如果没有新建
os.mkdir(path)
except FileExistsError:
pass
try:
db, cursor = get_db()
#更新小说状态,以免其它任务重复下载
sql = 'update fiction set updating = 1 where id = %s' % fiction[0]
cursor.execute(sql)
db.commit()
except Exception as e:
logger.error("database error, and rollback", exc_info = True)
db.rollback()
close_db(db, cursor)
logger.info("downloading fiction id:"+str(fiction[0])+",url:"+fiction[1])
dl = downloader(url = fiction[1], num = fiction[2], path = path, fiction_id = fiction[0],logger = download_logger, proxies = proxies)
num = dl.update()
try:
sql = 'update fiction set num = %s,updating=0 where id = %s'
db, cursor = get_db()
cursor.execute(sql, (num, fiction[0]))
db.commit()
except Exception as e:
logger.error("database error, and rollback", exc_info = True)
db.rollback()
close_db(db, cursor)
download_logger.removeHandler(download_handler)
#更新新增加的小说
def update_new_fictions():
#获取配置文件
cf = configparser.ConfigParser()
cf.read("config.conf")
options = cf.options('fiction')
#读取上次更新的小说的id
last_fiction = cf.getint('fiction', options[0])
fiction_list = []
db, cursor = get_db()
sql = "select id,url,num,updating from fiction where id > %s order by id" % last_fiction
cursor.execute(sql)
results = cursor.fetchall()
last_fiction_id = last_fiction
for row in results:
if row[3] == 0:
fiction_list.append(row)
last_fiction_id = row[0]
#把新增加的小说的最后一本的id写入配置文件,方便下次更新查询
cf.set('fiction', options[0], str(last_fiction_id))
with open('config.conf','w') as f:
cf.write(f)
logger.info('update new fictions')
#关闭数据库连接
close_db(db, cursor)
#下载小说
update_fictions(fiction_list)
#更新数据库中的所有小说
def update_all_fictions():
fiction_list = []
db, cursor = get_db()
sql = "select id,url,num,updating from fiction";
cursor.execute(sql)
results = cursor.fetchall()
for row in results:
if row[3] == 0:
fiction_list.append(row)
logger.info("update all fictions")
#关闭数据库连接
close_db(db, cursor)
#下载小说
update_fictions(fiction_list)
#create a scheduled task, which runs once a minute
def start_minute_task():
scheduler = BackgroundScheduler()
scheduler.add_job(update_new_fictions, 'interval', seconds = 60)
scheduler.start()
#create a scheduled task, which runs once a day
def start_day_task():
scheduler = BackgroundScheduler()
scheduler.add_job(update_all_fictions, 'cron', day_of_week = '0-6', hour=0, minute = 0, second = 0)
scheduler.start()
#set up two scheduled tasks
def start():
start_minute_task()
start_day_task()
if __name__ == '__main__':
handler = get_handler(conf.update_log_file)
logger.addHandler(handler)
try:
start()
while True:
proxyPool = pool.Pool()
proxies = proxyPool.pool()
logger.info("system is normal")
time.sleep(86400)
except Exception as e:
logger.error("system error", exc_info = True)
| StarcoderdataPython |
11291693 | from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0003_auto_20151219_1141'),
]
operations = [
migrations.RemoveField(
model_name='userprofile',
name='start_workflow',
),
]
| StarcoderdataPython |
1824747 | <reponame>58563528/omega-miya
import imaplib
import email
import hashlib
from email.header import Header
from typing import List
class Email(object):
def __init__(self, date: str, header: str, sender: str, to: str, body: str = '', html: str = ''):
self.date = date
self.header = header
self.sender = sender
self.to = to
self.body = body
self.html = html
hash_str = str([date, header, sender, to])
md5 = hashlib.md5()
md5.update(hash_str.encode('utf-8'))
_hash = md5.hexdigest()
self.hash = _hash
def __repr__(self):
return f'<Email(header={self.header}, _from={self.sender}, to={self.to}' \
f"\n\nbody={self.body}\n\nhtml={self.html})>"
class EmailImap(object):
def __init__(self, host: str, address: str, password: str, port: int = 993):
self.__mail = imaplib.IMAP4_SSL(host=host, port=port)
self.__address = address
self.__password = password
def __enter__(self):
"""enter方法,返回file_handler"""
self.__mail.login(self.__address, self.__password)
return self.__mail
def __exit__(self, exc_type, exc_val, exc_tb):
"""exit方法,关闭文件并返回True"""
self.__mail.select()
if self.__mail.state == 'SELECTED':
self.__mail.close()
self.__mail.logout()
return True
def get_mail_info(self, charset, *criteria) -> List[Email]:
self.__mail.login(self.__address, self.__password)
if self.__address.endswith('@163.com'):
# 添加163邮箱 IMAP ID 验证
imaplib.Commands['ID'] = ('AUTH',)
args = ("name", "omega", "contact", "<EMAIL>", "version", "1.0.2", "vendor", "pyimaplibclient")
typ, dat = self.__mail._simple_command('ID', '("' + '" "'.join(args) + '")')
self.__mail._untagged_response(typ, dat, 'ID')
self.__mail.select()
typ, msg_nums = self.__mail.search(charset, *criteria)
msg_nums = str(msg_nums[0], encoding='utf8')
result_list = []
# 遍历所有邮件
for num in msg_nums.split(' '):
if num == '':
continue
stat_code, data = self.__mail.fetch(num, 'RFC822')
msg = email.message_from_bytes(data[0][1])
# 解析邮件
# 日期
date = email.header.decode_header(msg.get('Date'))[0][0]
date = str(date)
# 标题
header, charset = email.header.decode_header(msg.get('subject'))[0]
header = str(header, encoding=charset)
# 发件人
sender_info = email.header.decode_header(msg.get('from'))
sender = ''
for sender_text, charset in sender_info:
if charset and type(sender_text) == bytes:
sender_text = str(sender_text, encoding=charset)
sender += sender_text
elif type(sender_text) == bytes:
sender_text = str(sender_text, encoding='utf8')
sender += sender_text
else:
sender += sender_text
# 收件人
receiver_info = email.header.decode_header(msg.get('to'))
receiver = ''
for receiver_text, charset in receiver_info:
if charset and type(receiver_text) == bytes:
receiver_text = str(receiver_text, encoding=charset)
receiver += receiver_text
elif type(receiver_text) == bytes:
receiver_text = str(receiver_text, encoding='utf8')
receiver += receiver_text
else:
receiver += receiver_text
body = None
html = None
for part in msg.walk():
if part.get_content_type() == "text/plain":
charset = part.get_content_charset()
body = part.get_payload(decode=True)
if not body:
continue
if charset and type(body) == bytes:
body = str(body, encoding=charset)
elif type(body) == bytes:
body = str(body, encoding='utf8')
else:
body = str(body)
body = body.replace(r' ', '\n')
elif part.get_content_type() == "text/html":
charset = part.get_content_charset()
html = part.get_payload(decode=True)
if not html:
continue
if charset and type(html) == bytes:
html = str(html, encoding=charset)
elif type(html) == bytes:
html = str(html, encoding='utf8')
else:
html = str(html)
html = html.replace(' ', '')
else:
pass
result_list.append(Email(date=date, header=header, sender=sender, to=receiver, body=body, html=html))
return result_list
| StarcoderdataPython |
6507069 | <reponame>codelieche/kanban
"""
页面相关的序列化
"""
from rest_framework import serializers
from account.models import User
from account.tasks.message import send_message
from docs.models.article import Article
class ArticleModelSerializer(serializers.ModelSerializer):
"""
Article Model Serializer
"""
user = serializers.SlugRelatedField(slug_field="username",
queryset=User.objects.all(), required=False)
def validate(self, attrs):
# 设置user
if self.context["request"].method == "POST":
user = self.context["request"].user
attrs["user"] = user
# 判断如果传递了parent,那么分类与parent相同
if "parent" in attrs:
parent = attrs["parent"]
if parent:
attrs["group"] = parent.group
return attrs
def create(self, validated_data):
# 调用父类方法
instance = super().create(validated_data=validated_data)
# 创建文章成功:发送消息
link = "/docs/article/{}".format(instance.id)
send_message(
user=validated_data["user"], title="创建文章成功", content="创建文章成功",
link=link, sender="system", scope="docs", website=True, dingding=False
)
return instance
def update(self, instance, validated_data):
instance = super().update(instance=instance, validated_data=validated_data)
return instance
def get_fields(self):
fields = super().get_fields()
# 如果是获取子页面
return fields
class Meta:
model = Article
fields = (
"id", "title", "group", "icon", "description", "cover",
"user", "parent", "infovalues", "time_added", "time_updated",
"content", "order", "level", "is_active"
)
class ArticleParentInfoSerializer(serializers.ModelSerializer):
"""
文章的父亲信息
"""
def get_fields(self):
fields = super().get_fields()
fields["parent"] = ArticleParentInfoSerializer(read_only=True, required=False)
return fields
class Meta:
model = Article
fields = ("id", "title", "parent")
class ArticleListModelSerializer(serializers.ModelSerializer):
"""
Article List Model Serializer
"""
user = serializers.SlugRelatedField(slug_field="username", read_only=True, required=False)
parent = ArticleParentInfoSerializer(read_only=True, required=False)
def get_fields(self):
fields = super().get_fields()
# 如果是获取子页面
return fields
class Meta:
model = Article
fields = (
"id", "title", "group", "icon", "description", "cover",
"user", "parent", "infovalues", "time_added",
"order", "level"
)
class ArticleDetailSerializer(serializers.ModelSerializer):
"""
Article Detail Model Serializer
"""
parent = ArticleParentInfoSerializer(read_only=True, required=False)
user = serializers.SlugRelatedField(read_only=True, required=False, slug_field="username")
def validate(self, attrs):
# 设置user
if self.context["request"].method == "POST":
user = self.context["request"].user
attrs["user"] = user
# 判断如果传递了parent,那么分类与parent相同
if "parent" in attrs:
parent = attrs["parent"]
if parent:
attrs["group"] = parent.group
return attrs
def get_fields(self):
fields = super().get_fields()
# 如果是获取子页面
fields["children"] = ArticleDetailSerializer(many=True, required=False, read_only=True)
return fields
class Meta:
model = Article
fields = (
"id", "title", "group", "icon", "description", "cover",
"user", "parent", "infovalues",
"content", "order", "level", "is_active", "time_added", "time_updated"
)
class ArticleAllSerializer(serializers.ModelSerializer):
"""
左侧导航获取所有文章列表
"""
user = serializers.SlugRelatedField(slug_field="username", read_only=True)
def get_fields(self):
fields = super().get_fields()
fields["children"] = ArticleAllSerializer(many=True, read_only=True)
return fields
class Meta:
model = Article
fields = (
"id", "title", "icon", "group", "user", "order", "level", "children"
)
class ArticleWithInfovaluesListSerializer(serializers.ModelSerializer):
"""
获取文章的列表
只显示ID、标题、属性
"""
class Meta:
model = Article
fields = ("id", "title", "infovalues")
| StarcoderdataPython |
5097488 | <reponame>ericflo/django-couch-lifestream
# Not much to see here. We're using CouchDB :) | StarcoderdataPython |
9653217 | <reponame>Into-Y0u/Github-Baby
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack, cur = [], ""
for c in s:
if cur and c != cur[-1]:
stack.append(cur)
cur = ""
cur += c
while len(cur) >= k:
if not stack:
cur = ""
break
cur = stack.pop()
stack.append(cur)
return "".join(stack)
| StarcoderdataPython |
3300547 | import os
import logging
import nemo
import nemo.collections.asr as nemo_asr
from label_studio_ml.model import LabelStudioMLBase
from label_studio_ml.utils import DATA_UNDEFINED_NAME
logger = logging.getLogger(__name__)
class NemoASR(LabelStudioMLBase):
def __init__(self, model_name='QuartzNet15x5Base-En', **kwargs):
super(NemoASR, self).__init__(**kwargs)
# Find TextArea control tag and bind ASR model to it
self.from_name, self.to_name, self.value = self._bind_to_textarea()
# This line will download pre-trained QuartzNet15x5 model from NVIDIA's NGC cloud and instantiate it for you
self.model = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name=model_name)
def predict(self, tasks, **kwargs):
output = []
audio_paths = []
for task in tasks:
audio_url = task['data'].get(self.value) or task['data'].get(DATA_UNDEFINED_NAME)
audio_path = self.get_local_path(audio_url)
audio_paths.append(audio_path)
# run ASR
transcriptions = self.model.transcribe(paths2audio_files=audio_paths)
for transcription in transcriptions:
output.append({
'result': [{
'from_name': self.from_name,
'to_name': self.to_name,
'type': 'textarea',
'value': {
'text': [transcription]
}
}],
'score': 1.0
})
return output
def _bind_to_textarea(self):
from_name, to_name, value = None, None, None
for tag_name, tag_info in self.parsed_label_config.items():
if tag_info['type'] == 'TextArea':
from_name = tag_name
if len(tag_info['inputs']) > 1:
logger.warning(
'ASR model works with single Audio or AudioPlus input, '
'but {0} found: {1}. We\'ll use only the first one'.format(
len(tag_info['inputs']), ', '.join(tag_info['to_name'])))
if tag_info['inputs'][0]['type'] not in ('Audio', 'AudioPlus'):
raise ValueError('{0} tag expected to be of type Audio or AudioPlus, but type {1} found'.format(
tag_info['to_name'][0], tag_info['inputs'][0]['type']))
to_name = tag_info['to_name'][0]
value = tag_info['inputs'][0]['value']
if from_name is None:
raise ValueError('ASR model expects <TextArea> tag to be presented in a label config.')
return from_name, to_name, value
def fit(self, completions, workdir=None, **kwargs):
project_path = kwargs.get('project_full_path')
if os.path.exists(project_path):
logger.info('Found project in local path ' + project_path)
else:
logger.error('Project not found in local path ' + project_path + '. Serving uploaded data will fail.')
return {'project_path': project_path}
| StarcoderdataPython |
38161 | <filename>Python3-world2/ex068.py
import random
title = 'par ou ímpar'.upper()
print('~~' * 10)
print(f'\033[7;30m{title:^20}\033[m')
print('~~' * 10)
# poi = par ou impar
# vop = vitoria ou perda
poi = vop = ''
cont = 0
while True:
cont += 1
escolha_numero = int(input('Digite um número: '))
escolha_parinpar = str(input('Par ou Impar[P/I]: ')).strip().upper()[0]
en = escolha_numero
epi = escolha_parinpar
# npc = numero do pc
npc = random.randint(1, 100)
soma = npc + en
if soma % 2 == 0:
poi = 'par'
else:
poi = 'ímpar'
if soma % 2 == 0 and epi in 'Pp' or soma % 2 != 0 and epi in 'Ii':
vop = '\033[1;32mganhou\033[m'
else:
vop = '\033[1;31mperdeu\033[m'
break
print(f'O numero escolhido pelo pc foi : {npc}')
print(f'A soma foi: \033[1;33m{soma}\033[m')
print(f'Você {vop}, deu \033[1;33m{poi}\033[m')
print('---' * 10)
print(f'O numero escolhido pelo pc foi : {npc}')
print(f'A soma foi: \033[1;33m{soma}\033[m')
if cont > 1:
print(f'Depois de {cont - 1} vezes seguidas, ', end='')
print(f'Você {vop}, deu \033[1;33m{poi}')
| StarcoderdataPython |
257031 | # Improtant note: This data file would ordinarily be used to connect with a proper database server
# more likely PostgreSQL, but thats me. I do plan on rewritting this in the future for such implementations.
# With that said, this file will be be very slow to run and only to demonstrate data processing using
# functions and pandas along with providing a central file for data references
#
# Import Pandas
import pandas as pd
# Import CSV data
# Import team historical statistics
# Some historical team names are correlated with their more modern counter part
# Custome CSV files where created from the original by combining data to allow
# for easier display of historical team data
teams = pd.read_csv("data/update_team.csv")
# Import Players batting data
batters = pd.read_csv("data/update_batting.csv")
# Import custom Fielding data
fielding = pd.read_csv("data/update_fielding.csv")
# Import custom pitching data
pitching = pd.read_csv("data/update_pitching.csv")
# Import Player profile data
players = pd.read_csv("data/update_player.csv")
# Import custom player and team id dataframe
team_players = pd.read_csv("data/player_team.csv")
# Hardcoded list of era names as key value pairs
era_list = [
{"label": "Dead Ball ('03-'19)", "value": "Dead Ball"},
{"label": "Live Ball ('20-'41)", "value": "Live Ball"},
{"label": "Integration ('42-'60)", "value": "Integration"},
{"label": "Expantion ('61-'76)", "value": "Expantion"},
{"label": "Free Agency ('77-'93)", "value": "Free Agency"},
{"label": "Steroid ('94-'05)", "value": "Steroid"},
{"label": "Post-Steroid ('06-'15)", "value": "Post-Steroid"},
{"label": "Statcast ('16-'20)", "value": "Statcast"},
]
# Era markers
era_marks = {
1903: {"label": "1903"},
1919: {"label": "1919"},
1941: {"label": "1941"},
1960: {"label": "1960"},
1976: {"label": "1976"},
1993: {"label": "1993"},
2005: {"label": "2005"},
2015: {"label": "2015"},
2020: {"label": "2020"},
}
# Creates a dynamic list of team names based on era
def dynamicteams(x):
# Hardcoded list of era time spans, wouldnt do it this way if the set where larger
era_time = [
(1903, 1919),
(1920, 1941),
(1942, 1960),
(1961, 1976),
(1977, 1993),
(1994, 2005),
(2006, 2015),
(2016, 2020),
]
# create a filter list of just years and team names
filter_team_yr = teams[["year", "name", "team_id"]]
# filter the above list by year span
filter_year = filter_team_yr[
(filter_team_yr.year >= era_time[x][0])
& (filter_team_yr.year <= era_time[x][1])
] # High Year
# filter_year = filter_year[] # Low Year
# Create a filter list of Team names based on years filtered
filter_teams = filter_year["name"].unique()
filter_team_ids = filter_year["team_id"].unique()
# return unique list of team names as a list of key value pairs, rather than calling a function to create and return the list
# list comp of key value pair
# new is a list of names while x is the name in the list
return [{"label": k, "value": v} for k, v in zip(filter_teams, filter_team_ids)]
def dynamicrange(x):
# Hardcoded data is not typically what i do unless the set is small
era_time = [
(1903, 1919),
(1920, 1941),
(1942, 1960),
(1961, 1976),
(1977, 1993),
(1994, 2005),
(2006, 2015),
(2016, 2020),
]
return [era_time[x][0], era_time[x][1]]
# Calculate On-Base Percentage function
def calculate_obp(df):
# Set lists of team data
AB = df.ab
Ht = df.h
BB = df.bb
HBP = df.hbp
SF = df.sf
# return On-Base Percentage
return (Ht + BB + HBP) / (AB + BB + HBP + SF)
# Calculate Slugging Average
def calculate_slg(df):
# Set lists of player data
AB = df.ab
Ht = df.h
DBL = df.double
TRP = df.triple
HR = df.hr
SNG = Ht - DBL - TRP - HR
# return Slugging Average
return (SNG + 2 * DBL + 3 * TRP + 4 * HR) / AB
# Calculate WOBA
def calculate_woba(df):
# Selected players singles
SNG = df.h - df.double - df.triple - df.hr
# Weighted Plate Appearances
WPA = df.ab + df.bb - df.ibb + df.sf + df.hbp
# weighted on-base average, 2013 https://library.fangraphs.com/offense/woba/
return (
(0.690 * df.bb)
+ (0.722 * df.hbp)
+ (0.888 * SNG)
+ (1.271 * df.double)
+ (1.616 * df.triple)
+ (2.101 * df.hr)
) / WPA
# weighted on-base average, 2019 https://en.wikipedia.org/wiki/WOBA#2019_Formula
# return ((0.690 * df.bb) + (0.719 * df.hbp) + (0.87 * SNG) + (1.217 * df.double) + (1.529 * df.triple) + (1.94 * df.hr)) / WPA
| StarcoderdataPython |
3482933 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
import re
import sys
import traceback
from collections import OrderedDict
from datetime import datetime, timedelta
import requests
from lxml import etree
class Parser:
def __init__(self, config):
self.config = config
def deal_html(self, url, cookie):
"""处理html"""
print("url:", url)
html = requests.get(url, cookies=cookie).content
selector = etree.HTML(html)
return selector
def deal_garbled(self, info):
"""处理乱码"""
info = (info.xpath('string(.)').replace(u'\u200b', '').encode(
sys.stdout.encoding, 'ignore').decode(sys.stdout.encoding))
return info
def extract_picture_urls(self, info, weibo_id):
"""提取微博原始图片url"""
try:
a_list = info.xpath('div/a/@href')
first_pic = 'https://weibo.cn/mblog/pic/' + weibo_id + '?rl=0'
all_pic = 'https://weibo.cn/mblog/picAll/' + weibo_id + '?rl=1'
if first_pic in a_list:
if all_pic in a_list:
selector = self.deal_html(all_pic, self.config['cookie'])
preview_picture_list = selector.xpath('//img/@src')
picture_list = [
p.replace('/thumb180/', '/large/')
for p in preview_picture_list
]
picture_urls = ','.join(picture_list)
else:
if info.xpath('.//img/@src'):
preview_picture = info.xpath('.//img/@src')[-1]
picture_urls = preview_picture.replace(
'/wap180/', '/large/')
else:
sys.exit(
u"爬虫微博可能被设置成了'不显示图片',请前往"
u"'https://weibo.cn/account/customize/pic',修改为'显示'"
)
else:
picture_urls = u'无'
return picture_urls
except Exception:
return u'无'
def get_picture_urls(self, info, is_original):
"""获取微博原始图片url"""
try:
weibo_id = info.xpath('@id')[0][2:]
picture_urls = {}
if is_original:
original_pictures = self.extract_picture_urls(info, weibo_id)
picture_urls['original_pictures'] = original_pictures
if not self.config['filter']:
picture_urls['retweet_pictures'] = u'无'
else:
retweet_url = info.xpath("div/a[@class='cc']/@href")[0]
retweet_id = retweet_url.split('/')[-1].split('?')[0]
retweet_pictures = self.extract_picture_urls(info, retweet_id)
picture_urls['retweet_pictures'] = retweet_pictures
a_list = info.xpath('div[last()]/a/@href')
original_picture = u'无'
for a in a_list:
if a.endswith(('.gif', '.jpeg', '.jpg', '.png')):
original_picture = a
break
picture_urls['original_pictures'] = original_picture
return picture_urls
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_video_url(self, info, is_original):
"""获取微博视频url"""
try:
if is_original:
div_first = info.xpath('div')[0]
a_list = div_first.xpath('.//a')
video_link = u'无'
for a in a_list:
if 'm.weibo.cn/s/video/show?object_id=' in a.xpath(
'@href')[0]:
video_link = a.xpath('@href')[0]
break
if video_link != u'无':
video_link = video_link.replace(
'm.weibo.cn/s/video/show', 'm.weibo.cn/s/video/object')
wb_info = requests.get(
video_link, cookies=self.config['cookie']).json()
video_url = wb_info['data']['object']['stream'].get(
'hd_url')
if not video_url:
video_url = wb_info['data']['object']['stream']['url']
if not video_url: # 说明该视频为直播
video_url = u'无'
else:
video_url = u'无'
return video_url
except Exception:
return u'无'
def get_page_num(self, selector):
"""获取微博总页数"""
if selector.xpath("//input[@name='mp']") == []:
page_num = 1
else:
page_num = (int)(
selector.xpath("//input[@name='mp']")[0].attrib['value'])
return page_num
def get_long_weibo(self, weibo_link):
"""获取长原创微博"""
selector = self.deal_html(weibo_link, self.config['cookie'])
info = selector.xpath("//div[@class='c']")[1]
wb_content = self.deal_garbled(info)
wb_time = info.xpath("//span[@class='ct']/text()")[0]
weibo_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(wb_time)]
return weibo_content
def get_original_weibo(self, info, weibo_id):
"""获取原创微博"""
weibo_content = self.deal_garbled(info)
weibo_content = weibo_content[:weibo_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'https://weibo.cn/comment/' + weibo_id
wb_content = self.get_long_weibo(weibo_link)
if wb_content:
weibo_content = wb_content
return weibo_content
def get_long_retweet(self, weibo_link):
"""获取长转发微博"""
wb_content = self.get_long_weibo(weibo_link)
weibo_content = wb_content[:wb_content.rfind(u'原文转发')]
return weibo_content
def get_retweet(self, info, weibo_id):
"""获取转发微博"""
wb_content = self.deal_garbled(info)
wb_content = wb_content[wb_content.find(':') +
1:wb_content.rfind(u'赞')]
wb_content = wb_content[:wb_content.rfind(u'赞')]
a_text = info.xpath('div//a/text()')
if u'全文' in a_text:
weibo_link = 'https://weibo.cn/comment/' + weibo_id
weibo_content = self.get_long_retweet(weibo_link)
if weibo_content:
wb_content = weibo_content
retweet_reason = self.deal_garbled(info.xpath('div')[-1])
retweet_reason = retweet_reason[:retweet_reason.rindex(u'赞')]
original_user = info.xpath("div/span[@class='cmt']/a/text()")
if original_user:
original_user = original_user[0]
wb_content = (retweet_reason + '\n' + u'原始用户: ' + original_user +
'\n' + u'转发内容: ' + wb_content)
else:
wb_content = retweet_reason + '\n' + u'转发内容: ' + wb_content
return wb_content
def is_original(self, info):
"""判断微博是否为原创微博"""
is_original = info.xpath("div/span[@class='cmt']")
if len(is_original) > 3:
return False
else:
return True
def get_weibo_content(self, info, is_original):
"""获取微博内容"""
weibo_id = info.xpath('@id')[0][2:]
if is_original:
weibo_content = self.get_original_weibo(info, weibo_id)
else:
weibo_content = self.get_retweet(info, weibo_id)
return weibo_content
def get_publish_place(self, info):
"""获取微博发布位置"""
div_first = info.xpath('div')[0]
a_list = div_first.xpath('a')
publish_place = u'无'
for a in a_list:
if ('place.weibo.com' in a.xpath('@href')[0]
and a.xpath('text()')[0] == u'显示地图'):
weibo_a = div_first.xpath("span[@class='ctt']/a")
if len(weibo_a) >= 1:
publish_place = weibo_a[-1]
if (u'视频' == div_first.xpath("span[@class='ctt']/a/text()")
[-1][-2:]):
if len(weibo_a) >= 2:
publish_place = weibo_a[-2]
else:
publish_place = u'无'
publish_place = self.deal_garbled(publish_place)
break
return publish_place
def get_publish_time(self, info):
"""获取微博发布时间"""
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = self.deal_garbled(str_time[0])
publish_time = str_time.split(u'来自')[0]
if u'刚刚' in publish_time:
publish_time = datetime.now().strftime('%Y-%m-%d %H:%M')
elif u'分钟' in publish_time:
minute = publish_time[:publish_time.find(u'分钟')]
minute = timedelta(minutes=int(minute))
publish_time = (datetime.now() -
minute).strftime('%Y-%m-%d %H:%M')
elif u'今天' in publish_time:
today = datetime.now().strftime('%Y-%m-%d')
time = publish_time[3:]
publish_time = today + ' ' + time
if len(publish_time) > 16:
publish_time = publish_time[:16]
elif u'月' in publish_time:
year = datetime.now().strftime('%Y')
month = publish_time[0:2]
day = publish_time[3:5]
time = publish_time[7:12]
publish_time = year + '-' + month + '-' + day + ' ' + time
else:
publish_time = publish_time[:16]
return publish_time
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_publish_tool(self, info):
"""获取微博发布工具"""
try:
str_time = info.xpath("div/span[@class='ct']")
str_time = self.deal_garbled(str_time[0])
if len(str_time.split(u'来自')) > 1:
publish_tool = str_time.split(u'来自')[1]
else:
publish_tool = u'无'
return publish_tool
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_weibo_footer(self, info):
"""获取微博点赞数、转发数、评论数"""
try:
footer = {}
pattern = r'\d+'
str_footer = info.xpath('div')[-1]
str_footer = self.deal_garbled(str_footer)
str_footer = str_footer[str_footer.rfind(u'赞'):]
weibo_footer = re.findall(pattern, str_footer, re.M)
up_num = int(weibo_footer[0])
footer['up_num'] = up_num
retweet_num = int(weibo_footer[1])
footer['retweet_num'] = retweet_num
comment_num = int(weibo_footer[2])
footer['comment_num'] = comment_num
return footer
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def get_one_weibo(self, info):
"""获取一条微博的全部信息"""
try:
weibo = OrderedDict()
is_original = self.is_original(info)
if (not self.config['filter']) or is_original:
weibo['id'] = info.xpath('@id')[0][2:]
weibo['content'] = self.get_weibo_content(info,
is_original) # 微博内容
weibo['publish_place'] = self.get_publish_place(info) # 微博发布位置
weibo['publish_time'] = self.get_publish_time(info) # 微博发布时间
weibo['publish_tool'] = self.get_publish_tool(info) # 微博发布工具
footer = self.get_weibo_footer(info)
weibo['up_num'] = footer['up_num'] # 微博点赞数
weibo['retweet_num'] = footer['retweet_num'] # 转发数
weibo['comment_num'] = footer['comment_num'] # 评论数
picture_urls = self.get_picture_urls(info, is_original)
weibo['original_pictures'] = picture_urls[
'original_pictures'] # 原创图片url
if not self.config['filter']:
weibo['retweet_pictures'] = picture_urls[
'retweet_pictures'] # 转发图片url
weibo['original'] = is_original # 是否原创微博
weibo['video_url'] = self.get_video_url(info,
is_original) # 微博视频url
else:
weibo = None
return weibo
except Exception as e:
print('Error: ', e)
traceback.print_exc()
def is_pinned_weibo(self, info):
"""判断微博是否为置顶微博"""
kt = info.xpath(".//span[@class='kt']/text()")
if kt and kt[0] == u'置顶':
return True
else:
return False
| StarcoderdataPython |
5077267 | from django.db import models
# Create your models here.
class Shift(models.Model):
SHIFT_NAME_CHOICES = (
('A', 'Turno A'),
('B', 'Turno B'),
('C', 'Turno C'),
)
shift_name = models.CharField(
max_length=8,
choices=SHIFT_NAME_CHOICES,
)
start_time = models.TimeField(blank=True, null=True)
end_time = models.TimeField(blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
update_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.shift_name | StarcoderdataPython |
4874353 | <reponame>echaussidon/LSS
# Predict the DECam z and GAIA G magnitudes using Tycho-2 and 2MASS photometry
from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
import numpy as np
# import matplotlib
# matplotlib.use("Agg")
# import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack
import fitsio
# from astropy.io import fits
sys.path.append(os.path.expanduser('~/git/Python/user_modules/'))
import match_coord
tycho2_path = '/global/project/projectdirs/cosmo/staging/tycho2/tycho2.kd.fits'
twomass_path = '/global/cfs/cdirs/desi/users/rongpu/useful/2mass_psc/2mass_psc_j_12.fits'
output_path = '/global/cfs/cdirs/desi/users/rongpu/useful/tycho2-reference.fits'
tycho2 = Table(fitsio.read(tycho2_path))
twomass = Table(fitsio.read(twomass_path))
mask_bad = tycho2['MAG_VT']==0
mask = (tycho2['EPOCH_RA']!=0) & (tycho2['EPOCH_DEC']!=0)
tycho_ra_j2000 = tycho2['RA'].copy()
tycho_dec_j2000 = tycho2['DEC'].copy()
tycho_ra_j2000[mask] = (tycho2['RA'] - (tycho2['EPOCH_RA']-2000.) * tycho2['PM_RA'] * 1/3600 / np.cos(np.radians(tycho2['DEC'])))[mask]
tycho_dec_j2000[mask] = (tycho2['DEC'] - (tycho2['EPOCH_DEC']-2000.) * tycho2['PM_DEC'] * 1/3600)[mask]
# Give invalid mags lowest priority
mag_vt = tycho2['MAG_VT'].copy()
mag_vt[mask_bad] = 99.
mask_hp = mask_bad & (tycho2['MAG_HP']!=0) # use MAG_HP (Hipparcos mag) when VT is unavailable
mag_vt[mask_hp] = tycho2['MAG_HP'][mask_hp]
idx1, idx2, d2d, d_ra, d_dec = match_coord.match_coord(twomass['RAJ2000'], twomass['DEJ2000'], tycho_ra_j2000, tycho_dec_j2000, priority2=-mag_vt, search_radius=5., plot_q=False)
print(len(idx1)/len(twomass))
print(len(idx1)/len(tycho2))
tycho2['Jmag'] = np.nan
tycho2['Hmag'] = np.nan
tycho2['Kmag'] = np.nan
tycho2['zguess'] = np.nan
tycho2['ggguess'] = np.nan
twomass = twomass[idx1]
tycho2['Jmag'][idx2] = twomass['Jmag']
tycho2['Hmag'][idx2] = twomass['Hmag']
tycho2['Kmag'][idx2] = twomass['Kmag']
coeffs_z = [-0.01835938, -0.68084937, 0.49222576]
coeffs_gg = [0.00445346, -0.07819228, -0.07145574, 0.00278177]
xmin, xmax = -1, 8
x = tycho2['MAG_VT'][idx2]-twomass['Jmag']
pz = np.poly1d(coeffs_z)
tycho2['zguess'][idx2] = pz(np.clip(x, xmin, xmax)) + tycho2['MAG_VT'][idx2]
tycho2['zguess'][mask_bad] = np.nan
pgg = np.poly1d(coeffs_gg)
tycho2['ggguess'][idx2] = pgg(np.clip(x, xmin, xmax)) + tycho2['MAG_VT'][idx2]
tycho2['ggguess'][mask_bad] = np.nan
tycho2.write(output_path, overwrite=True)
| StarcoderdataPython |
5142472 | <reponame>mgalves/tweets
from os import listdir
from os.path import isfile, join
def load_dataset(dataset):
"""
Carrega os dados dos arquivos de mapeamento
Retorna duas listas: uma completa para o TWITTER, outra quebrada por arquivo
"""
keywords = [] # Lista completa
keywords_by_file = {} # Lista por arquivos
base_folder = f"/opt/datasets/{dataset}"
for filename in listdir(base_folder):
# Percorre o diretorio todo
fullpath = join(base_folder, filename)
if isfile(fullpath):
# EH um arquivo. Vamos ler
print(f"FILE {fullpath}")
file = open(fullpath)
line = file.readline()
file.close()
kws = line.split("=")[1].split(",")
keywords.extend(kws) # Atualiza lista completa
keywords_by_file[filename] = kws
return keywords, keywords_by_file
| StarcoderdataPython |
58674 | <filename>ysi_prediction/ysi_flask/prediction.py
import numpy as np
import pandas as pd
from sklearn.linear_model import BayesianRidge
from ysi_flask.colors import husl_palette
from ysi_flask.fragdecomp.chemical_conversions import canonicalize_smiles
from ysi_flask.fragdecomp.fragment_decomposition import (
FragmentError,
draw_fragment,
draw_mol_svg,
get_fragments,
)
from ysi_flask.fragdecomp.nullspace_outlier import NullspaceClassifier
try:
from flask import Markup
flask = True
except ImportError:
flask = False
# Grab the most recent YSI data from github
ysi = pd.read_csv(
"https://raw.githubusercontent.com/pstjohn/YSIs_for_prediction/master/ysi.csv"
)
# we use this for weighting, so provide a 5% relative error if none given
ysi.YSI_err = ysi.YSI_err.fillna(np.abs(ysi.YSI * 0.05))
# Parse ysi fragments
frags = ysi.SMILES.apply(get_fragments).fillna(0).astype(int)
# Fit YSI model
nullspace = NullspaceClassifier()
nullspace.fit(frags)
bridge = BayesianRidge(fit_intercept=False)
bridge.fit(frags, ysi.YSI, sample_weight=1 / ysi.YSI_err)
frag_means, frag_stds = bridge.predict(np.eye(frags.shape[1]), return_std=True)
beta = pd.DataFrame(
np.vstack([frag_means, frag_stds]).T, index=frags.columns, columns=["mean", "std"]
)
beta = beta.round(1)
beta["train_count"] = frags.sum(0)
ysi = ysi.set_index("SMILES")
def predict(smiles):
try:
assert smiles is not None
fragments = get_fragments(smiles)
except Exception:
raise FragmentError
isoutlier = False
# See if an experimental value exists
try:
ysi_exp = ysi.loc[canonicalize_smiles(smiles)]
exp_mean = round(ysi_exp.YSI, 1)
exp_std = round(ysi_exp.YSI_err, 1)
exp_name = ysi_exp.Species
except KeyError:
exp_mean = None
exp_std = None
exp_name = None
# Make sure all the fragments are found in the database
if not fragments.index.isin(frags.columns).all():
isoutlier = True
# Put the fragments in the correct order
reindexed_frags = fragments.reindex(frags.columns).fillna(0).astype(int)
# Make sure the fragments are not present in nonlinear combinations of database
if nullspace.predict(reindexed_frags):
isoutlier = True
# Predict based off previous regression
mean, std = bridge.predict(reindexed_frags.values.reshape(1, -1), return_std=True)
# process fragments for display
colors = husl_palette(n_colors=len(fragments))
frag_df = pd.DataFrame(fragments, columns=["count"])
frag_df["color"] = colors
if flask:
frag_df["svg"] = frag_df.apply(
lambda x: Markup(draw_fragment(x.name, x.color)), 1
)
frag_df = frag_df.join(beta, how="left").fillna(0)
return mean[0], std[0], isoutlier, frag_df, exp_mean, exp_std, exp_name
def return_fragment_matches(frag_str):
"""return a database of molecules matching the input fragment"""
matches = ysi[(frags[frag_str] != 0).values].reset_index()
color = (0.9677975592919913, 0.44127456009157356, 0.5358103155058701)
if len(matches) > 20:
matches = matches.sample(20)
matches["svg"] = matches.SMILES.apply(
lambda x: Markup(
draw_mol_svg(x, figsize=(80, 80), color_dict={frag_str: color})
)
)
return beta.loc[frag_str], matches.round(1)
def predict_apply(smiles):
"""function optimized for pandas series of SMILES strings"""
try:
fragments = get_fragments(smiles)
except Exception:
raise FragmentError
isoutlier = False
# See if an experimental value exists
try:
ysi_exp = ysi.loc[canonicalize_smiles(smiles)]
exp_mean = ysi_exp.YSI
exp_std = ysi_exp.YSI_err
except KeyError:
exp_mean = None
exp_std = None
# Make sure all the fragments are found in the database
if not fragments.index.isin(frags.columns).all():
isoutlier = True
# Put the fragments in the correct order
reindexed_frags = fragments.reindex(frags.columns).fillna(0).astype(int)
# Make sure the fragments are not present in nonlinear combinations of database
if nullspace.predict(reindexed_frags):
isoutlier = True
# Predict based off previous regression
mean, std = bridge.predict(reindexed_frags.values.reshape(1, -1), return_std=True)
prediction_type = "prediction" if not isoutlier else "outlier"
if exp_mean:
prediction_type = "experiment"
return pd.Series(
{
"YSI": exp_mean if exp_mean else mean[0],
"YSI_err": exp_std if exp_mean else std[0],
"pred_type": prediction_type,
},
name=smiles,
)
| StarcoderdataPython |
32976 | def percentage_format(x: float) -> str:
return f"{(x * 100):.1f}%"
| StarcoderdataPython |
3426011 | from flask import jsonify,g,request,url_for,current_app
from . import api
from ..models import User,Post
'''
Flasky应用API资源
资源URL 方法 说明
/users/ GET 返回所有用户
/users/<int:id> GET 返回一个用户
/users/<int:id>/posts/ GET 返回一个用户发布的所有文章
/users/<int:id>/timeline/ GET 返回一个用户所关注用户发布的所有文章
'''
@api.route('/users/')
def get_users():
page = request.args.get('page',1,type=int)
pagination = User.query.order_by(User.id.asc()).paginate(
page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],error_out=False)
users = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_users',page=page-1)
next = None
if pagination.has_next:
next = url_for('api.get_users',page=page+1)
return jsonify({
'users':[user.to_json() for user in users],
'prev':prev,
'next':next,
'count':pagination.total
})
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page',1,type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_posts',id=id,page=page-1)
next = None
if pagination.has_next:
next = url_for('api.get_user_posts',id=id,page=page+1)
return jsonify({
'posts':[post.to_json() for post in posts],
'prev':prev,
'next':next,
'count':pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page',1,type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page,per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_user_followed_posts',id=id,page=page-1)
next = None
if pagination.has_next:
next = url_for('api.get_user_followed_posts',id=id,page=page+1)
return jsonify({
'posts':[post.to_json() for post in posts],
'prev':prev,
'next':next,
'count':pagination.total
}) | StarcoderdataPython |
222681 | <reponame>leap-solutions-asia/auto-scaling<filename>dashboard/CloudStackConfig.py
import configparser
import os
from tempfile import NamedTemporaryFile
cloudstack_file = "/auto-scaling/cloudstack.ini"
class CloudStackConfig:
def __init__(self):
self._conf = configparser.ConfigParser()
if os.path.exists(cloudstack_file):
self._conf.read(cloudstack_file)
def create_configfile(self):
if not os.path.exists(cloudstack_file):
self._conf.write(open(cloudstack_file, 'w'))
def update_configfile(self):
dirname, basename = os.path.split(cloudstack_file)
with NamedTemporaryFile(mode='w', dir=dirname, prefix=basename, delete=False) as f:
tmpfile = f.name
with open(tmpfile, "w") as config_file:
self._conf.write(config_file)
if tmpfile:
os.rename(tmpfile, cloudstack_file)
def get_secret(self):
data = None
if self.has_cloudstack_section():
data = self._conf.get("cloudstack", "secret")
return data
def get_key(self):
data = None
if self.has_cloudstack_section():
data = self._conf.get("cloudstack", "key")
return data
def get_endpoint(self):
data = None
if self.has_cloudstack_section():
data = self._conf.get("cloudstack", "endpoint")
return data
def get_lb_rule_uuid(self):
data = None
if self.has_tenant_section():
data = self._conf.get("tenant", "lb_rule_uuid")
return data
def get_zone_uuid(self):
data = None
if self.has_tenant_section():
data = self._conf.get("tenant", "zone_uuid")
return data
def get_template_uuid(self):
data = None
if self.has_tenant_section():
data = self._conf.get("tenant", "template_uuid")
return data
def get_nw_uuid(self, nw):
data = None
if self.has_tenant_section():
data = self._conf.get("tenant", nw)
return data
def get_serviceoffering_uuid(self):
data = None
if self.has_tenant_section():
data = self._conf.get("tenant", "serviceoffering_uuid")
return data
def get_autoscaling_vm(self):
data = None
if self.has_autoscaling_section():
data = self._conf.get("autoscaling", "autoscaling_vm")
return data
def get_upper_limit(self):
data = None
if self.has_autoscaling_section():
data = self._conf.get("autoscaling", "upper_limit")
return data
def get_lower_limit(self):
data = None
if self.has_autoscaling_section():
data = self._conf.get("autoscaling", "lower_limit")
return data
def get_vm_uuid(self, vm):
data = None
if self.has_vm_section():
data = self._conf.get("vm", vm)
return data
def get_timezone(self):
data = None
if self.has_dashboard_section():
data = self._conf.get("dashboard", "timezone")
return data
def set_secret(self, data):
if not self.has_cloudstack_section():
self.add_cloudstack_section()
uuid = self._conf.set("cloudstack", "secret", data)
def set_key(self, data):
if not self.has_cloudstack_section():
self.add_cloudstack_section()
uuid = self._conf.set("cloudstack", "key", data)
def set_endpoint(self, data):
if not self.has_cloudstack_section():
self.add_cloudstack_section()
uuid = self._conf.set("cloudstack", "endpoint", data)
def set_lb_rule_uuid(self, data):
if not self.has_tenant_section():
self.add_tenant_section()
uuid = self._conf.set("tenant", "lb_rule_uuid", data)
def set_zone_uuid(self, data):
if not self.has_tenant_section():
self.add_tenant_section()
uuid = self._conf.set("tenant", "zone_uuid", data)
def set_template_uuid(self, data):
if not self.has_tenant_section():
self.add_tenant_section()
uuid = self._conf.set("tenant", "template_uuid", data)
def set_nw(self, item, data):
if not self.has_tenant_section():
self.add_tenant_section()
uuid = self._conf.set("tenant", item, data)
def set_serviceoffering_uuid(self, data):
if not self.has_tenant_section():
self.add_tenant_section()
uuid = self._conf.set("tenant", "serviceoffering_uuid", data)
def set_autoscaling_vm(self, data):
if not self.has_autoscaling_section():
self.add_autoscaling_section()
uuid = self._conf.set("autoscaling", "autoscaling_vm", str(data))
def set_timezone(self, data):
if not self.has_dashboard_section():
self.add_dashboard_section()
timezone = self._conf.set("dashboard", "timezone", str(data))
def set_upper_limit(self, data):
if not self.has_autoscaling_section():
self.add_autoscaling_section()
uuid = self._conf.set("autoscaling", "upper_limit", str(data))
def set_lower_limit(self, data):
if not self.has_autoscaling_section():
self.add_autoscaling_section()
uuid = self._conf.set("autoscaling", "lower_limit", str(data))
def set_vm(self, item, data):
if not self.has_vm_section():
self.add_vm_section()
uuid = self._conf.set("vm", item, data)
def has_vm_section(self):
if self._conf.has_section("vm"):
return True
return False
def has_tenant_section(self):
if self._conf.has_section("tenant"):
return True
return False
def has_autoscaling_section(self):
if self._conf.has_section("autoscaling"):
return True
return False
def has_dashboard_section(self):
if self._conf.has_section("dashboard"):
return True
return False
def has_cloudstack_section(self):
if self._conf.has_section("cloudstack"):
return True
return False
def add_vm_section(self):
self._conf.add_section("vm")
def add_cloudstack_section(self):
self._conf.add_section("cloudstack")
def add_tenant_section(self):
self._conf.add_section("tenant")
def add_autoscaling_section(self):
self._conf.add_section("autoscaling")
def remove_cloudstack_section(self):
self._conf.remove_section("cloudstack")
def remove_tenant_section(self):
self._conf.remove_section("tenant")
def remove_autoscaling_section(self):
self._conf.remove_section("autoscaling")
def remove_dashboard_section(self):
self._conf.remove_section("dashboard")
def add_dashboard_section(self):
self._conf.add_section("dashboard")
def remove_vm_section(self):
self._conf.remove_section("vm")
def get_vm_list(self):
data = None
if self.has_vm_section():
data = self._conf.options("vm")
return data
def get_tenant_list(self):
data = None
if self.has_tenant_section():
data = self._conf.options("tenant")
return data
def get_networks(self):
data = []
if self.has_tenant_section():
for nw in self.get_tenant_list():
if nw.startswith("network"):
uuid = self.get_nw_uuid(nw)
data.append((uuid))
return data
| StarcoderdataPython |
5079676 | #missing : ln 5
def fib(n): # write Fibonacci series up to n
a = 0
b = 1
if a < n
print a
| StarcoderdataPython |
9619407 | <reponame>MW55/MDlatticeAnalysisTool
from Bio.PDB.Atom import Atom
from Bio.PDB.PDBParser import PDBParser
import numpy as np
import itertools
class Enviroment(object):
def __init__(self, protein_pdb_path, polymer_full_poses_path,
meshsize = np.array([3, 3, 3])):
self.prot_path = protein_pdb_path
self.poly_path = polymer_full_poses_path
self._meshsize = meshsize
self._protein_struc = self._read_protein(self.prot_path)
self._poly_poses = self._read_polymer_poses(self.poly_path)
self.protein_coords = self._get_protein_coords(self._protein_struc)
self.poly_poses_coords = self._get_poly_coords(self._poly_poses)
self.min_point = self.min_point(self.poly_poses_coords,
self.protein_coords)
self.max_point = self.max_point(self.poly_poses_coords,
self.protein_coords)
self._residues = self._filter_residues(self._protein_struc)
self.residue_list = self._create_residue_list(self._residues)
self.res_id_dict = self._create_residue_id_dict(self._residues)
self.geometric_center = self._get_geometric_center(self.protein_coords)
def _read_protein(self, prot_path):
parser = PDBParser(PERMISSIVE=1, QUIET=True)
return(parser.get_structure(prot_path, prot_path))
def _read_polymer_poses(self, poly_path):
parser = PDBParser(PERMISSIVE=1, QUIET=True)
structure = parser.get_structure(poly_path, poly_path)
poses = [model for model in structure]
return poses
def _get_protein_coords(self, protein_struc):
protein_coords = [atom.get_coord() for atom in protein_struc.get_atoms()]
return protein_coords
def _get_poly_coords(self, poly_poses):
poses_coords = []
for model in poly_poses:
model_coords = []
for atom in model.get_atoms():
model_coords.append(atom.get_coord())
poses_coords.append(model_coords)
return poses_coords
def _get_geometric_center(self, protein_coords):
return(sum(protein_coords)/len(protein_coords))
# Calculate the lowest cartesian coordinates (rounded down) of all poses.
def min_point(self, poly_poses_coords, protein_coords):
minx = min([protein_coords[atom][0] for atom in
range(len(protein_coords))])
miny = min([protein_coords[atom][1] for atom in
range(len(protein_coords))])
minz = min([protein_coords[atom][2] for atom in
range(len(protein_coords))])
for i in range(len(poly_poses_coords)):
for j in range(len(poly_poses_coords[i])):
if poly_poses_coords[i][j][0] < minx:
minx = poly_poses_coords[i][j][0]
if poly_poses_coords[i][j][1] < miny:
miny = poly_poses_coords[i][j][1]
if poly_poses_coords[i][j][2] < minz:
minz = poly_poses_coords[i][j][2]
return(np.array([np.floor(minx), np.floor(miny), np.floor(minz)]))
# Calculate the highest cartesian coordinates (rounded up) of all poses.
def max_point(self, poly_poses_coords, protein_coords):
maxx = max([protein_coords[atom][0] for atom in
range(len(protein_coords))])
maxy = max([protein_coords[atom][1] for atom in
range(len(protein_coords))])
maxz = max([protein_coords[atom][2] for atom in
range(len(protein_coords))])
for i in range(len(poly_poses_coords)):
for j in range(len(poly_poses_coords[i])):
if poly_poses_coords[i][j][0] > maxx:
maxx = poly_poses_coords[i][j][0]
if poly_poses_coords[i][j][1] > maxy:
maxy = poly_poses_coords[i][j][1]
if poly_poses_coords[i][j][2] > maxz:
maxz = poly_poses_coords[i][j][2]
return(np.array([np.ceil(maxx), np.ceil(maxy), np.ceil(maxz)]))
def _filter_residues(self, protein_struc):
filter_hetatm = lambda x: x.id[0]==' '
return(filter(filter_hetatm, protein_struc.get_residues()))
# Creates a list of the residues with the coordinates of all atoms
# and the corresponding residue id as values.
def _create_residue_list(self, residues):
resi_coord_id_list = []
for residue in residues:
resi_coord_id_list.append([np.append(atom.get_coord(), residue.id[1])
for atom in residue.get_list()])
return list(itertools.chain.from_iterable(resi_coord_id_list))
# Creates a dict containing the residue ids and the corresponding
# residues.
def _create_residue_id_dict(self, residues):
residdict = {}
for residue in residues:
residdict[residue.id[1]] = residue.resname
return residdict
# The box generation is not in use at the moment, instead the box created
# by epitopsy is imported, to allow for comparisons between epitopsy
# results and mdl results.
# def _create_box(self):
# box = Box(self)
# return box
# class Box(object):
# def __init__(self, enviroment):
# self.offset = self.offset(enviroment.min_point)
# self.meshsize = enviroment._meshsize
# self.box_dim = self._calc_box_dim(enviroment.max_point,
# enviroment.min_point, self.meshsize)
# self.meshes_amount = self._number_of_meshes(self.meshsize,
# self.box_dim)
# # is used to translate the atomic coordinates to the coordinates
# of the box and vice versa.
# def offset(self, min_point):
# return abs(min_point)
# def _calc_box_dim(self, max_point, min_point, meshsize):
# box_dim = np.zeros(3)
# box_dim[0] = abs(max_point[0] - min_point[0])
# box_dim[1] = abs(max_point[1] - min_point[1])
# box_dim[2] = abs(max_point[2] - min_point[2])
# return np.floor(box_dim / self.meshsize)
# def _number_of_meshes(self, meshsize, box_dim):
# return np.prod(box_dim)
# def box_to_real_space(self, grid_coord):
# return grid_coord * self.meshsize - self.offset
# def real_to_box_space(self, atom_coord):
# return np.around((atom_coord + self.offset) / self.meshsize)
| StarcoderdataPython |
265779 | from . import matcher
import matplotlib.pyplot as plt
import matplotlib.colors as clrs
from scipy import stats
import numpy as np
import umap
import seaborn as sns
import matplotlib.patches as mpatches
def pearsonMatrix(dataset_filtered, patterns_filtered, cellTypeColumnName, num_cell_types, projectionName, plotName,
plot, row_cluster=True, col_cluster=True, path=None, display=True, dpi=300, xtickSize=8, ytickSize=8):
"""This method finds the pearson correlation coefficient between every pattern and every cell type
:param dataset_filtered: Anndata object cells x genes
:param patterns_filtered: Anndata object features x genes
:param cellTypeColumnName: index where the cell types are stored in dataset_filtered.obsm
:param num_cell_types: The number of cell types in the dataset this parameter could be removed
:param projectionName: The name of the projection created using one of the regression methods
:param plotName: The index for the pearson matrix in dataset_filtered.uns[plotName]
:param plot: If True a plot is generated either saved or displayed
:param row_cluster: Bool whether to cluster
:param col_cluster: Bool whether to cluster columns or not
:param dpi: Quality of image to be saved
:param display: Bool whether to display the plot or not
:param xtickSize: Size of labels on the x-axis
:param ytickSize: Size of labels on the y-axis
:type plot: boolean
:type projectionName: String
:type num_cell_types: int
:type cellTypeColumnName: String
:return: void
"""
matcher.sourceIsValid(dataset_filtered)
matcher.sourceIsValid(patterns_filtered)
color = matcher.mapCellNamesToInts(dataset_filtered, cellTypeColumnName)
matrix = np.zeros([num_cell_types, color.shape[0]])
pearson_matrix = np.empty([patterns_filtered.X.shape[0], num_cell_types])
for i in range(color.shape[0]):
cell_type = color[i]
matrix[cell_type][i] = 1
for i in range(patterns_filtered.X.shape[0]):
pattern = np.transpose(dataset_filtered.obsm[projectionName])[:][i]
for j in range(color.unique().shape[0]):
cell_type = matrix[j]
correlation = stats.pearsonr(pattern, cell_type)
pearson_matrix[i][j] = correlation[0]
dataset_filtered.uns[plotName] = pearson_matrix
if plot:
pearsonViz(dataset_filtered, plotName, cellTypeColumnName, row_cluster, col_cluster, path, display, dpi,
xtickSize, ytickSize)
def pearsonViz(dataset_filtered, plotName, cellTypeColumnName, row_cluster=True, col_cluster=True, path=None,
display=True, dpi=300, xtickSize=8, ytickSize=8):
""" Visualize or save a Pearson Matrix.
:param path:
:param dataset_filtered: Anndata object cells x genes
:param plotName: Index of pearson matrix to visualize
:param cellTypeColumnName: index for cell type in dataset_filtered.obsm
:param row_cluster: Bool whether to cluster rows or not
:param col_cluster: Bool whether to cluster columns or not
:param dpi: Quality of image to be saved
:param display: Bool whether to display the plot or not
:param xtickSize: Size of labels on the x-axis
:param ytickSize: Size of labels on the y-axis
:return: void
"""
matcher.sourceIsValid(dataset_filtered)
y_ticks = []
for i in range(dataset_filtered.uns[plotName].shape[0]):
y_ticks.append('Feature ' + str(i + 1))
if row_cluster or col_cluster:
sns.set(font_scale=1)
cluster = sns.clustermap(dataset_filtered.uns[plotName],
row_cluster=row_cluster,
col_cluster=col_cluster,
xticklabels=dataset_filtered.obs[cellTypeColumnName].unique(),
yticklabels=y_ticks)
cluster.ax_heatmap.set_yticklabels(cluster.ax_heatmap.yaxis.get_majorticklabels(), fontsize=ytickSize)
cluster.ax_heatmap.set_xticklabels(cluster.ax_heatmap.xaxis.get_majorticklabels(), fontsize=xtickSize)
else:
plt.title("Pearson Plot", fontsize=24)
sns.heatmap(dataset_filtered.uns[plotName],
xticklabels=dataset_filtered.obs[cellTypeColumnName].unique(),
yticklabels=y_ticks)
plt.tick_params(axis='y', labelsize=ytickSize)
plt.tick_params(axis='x', labelsize=xtickSize)
if path is None and display is True:
plt.show()
if path is not None and display is False:
plt.savefig(path, dpi=dpi)
plt.close()
if path is not None and display is True:
plt.show()
plt.savefig(path, dpi=dpi)
def UMAP_Projection(dataset_filtered, cellTypeColumnName, projectionName, UMAPName, n_neighbors, metric='euclidean',
plot=True, colorScheme='Paired', pointSize=.5, subset=None, path=None, display=True, dpi=300):
"""This method projects the pattern matrix down into 2 dimensional space. Make sure the colorScheme chosen has
enough colors for every cell type.
:param dataset_filtered: Anndata object cells x genes
:param cellTypeColumnName: index for cell type in dataset_filtered.obsm
:param projectionName: index for the projection in dataset_filtered.obsm
:param UMAPName: index for the created UMAP coordinates in dataset_filtered.obsm
:param n_neighbors: number of neighbors for the UMAP
:param metric: the distance metric used in the UMAP, defaults to euclidean
:param plot: If True a plot is displayed, defaults to True
:param colorScheme: seaborn color scheme to use, defaults to Paired
:param pointSize: size of the points, defaults to .5
:param subset: subset of types in cell type column name to plot
:param path: path to save figure
:param display: Whether to display the figure in the jupyter notebook
:param dpi: Quality of the plot to be saved
:return: void, mutates dataset_filtered and add the UMAP to obsm
"""
matcher.sourceIsValid(dataset_filtered)
umap_obj = umap.UMAP(n_neighbors=n_neighbors, metric=metric)
nd = umap_obj.fit_transform(dataset_filtered.obsm[projectionName])
dataset_filtered.obsm[UMAPName] = nd
if plot:
UMAP_Viz(dataset_filtered, UMAPName, cellTypeColumnName, colorScheme, pointSize, subset, path, display, dpi)
def UMAP_Viz(dataset_filtered, UMAPName, cellTypeColumnName, colorScheme='Paired', pointSize=.5, subset=None, path=None,
display=True, dpi=300):
"""Plots the UMAP of the pattern matrix. Make sure colorScheme has at least as many colors as cell types in your
dataset.
:param cellTypeColumnName: index for cell type in dataset_filtered.obs can be any column in .obs
:param dataset_filtered: Anndata object cells x genes
:param UMAPName: index for the UMAP in dataset_filtered.obsm
:param colorScheme: seaborn color scheme, defaults to Paired
:param pointSize: size of the points, defaults to .5
:param subset: subset of types in cell type column name to plot
:param path: path to save figure
:param display: Whether to display the figure in the jupyter notebook
:param dpi: Quality of the plot to be saved
:return: void
"""
if subset is None:
matcher.sourceIsValid(dataset_filtered)
color = matcher.mapCellNamesToInts(dataset_filtered, cellTypeColumnName)
numTypes = dataset_filtered.obs[cellTypeColumnName].unique().shape[0]
colors = []
palette = sns.color_palette(colorScheme, n_colors=numTypes)
for x in range(color.shape[0]):
colors.append(palette[color[x]])
nd = dataset_filtered.obsm[UMAPName]
plt.scatter(nd[:, 0], nd[:, 1],
c=colors, s=pointSize, rasterized=True)
plt.title("UMAP Projection of Pattern Matrix", fontsize=24)
handles = []
for i in range(dataset_filtered.obs[cellTypeColumnName].unique().shape[0]):
handles.append(mpatches.Patch(color=(sns.color_palette(colorScheme, n_colors=numTypes)[i]),
label=dataset_filtered.obs[cellTypeColumnName].unique()[i]))
plt.legend(handles=handles, title="Cell Types", fontsize='xx-small', loc='best')
if display is True and path is None:
plt.show()
if display is True and path is not None:
plt.show()
plt.savefig(path, dpi=dpi)
if display is False and path is not None:
plt.savefig(path, dpi=dpi)
plt.close()
else:
subsetted = dataset_filtered[dataset_filtered.obs[cellTypeColumnName].isin(subset)]
color = matcher.mapCellNamesToInts(subsetted, cellTypeColumnName)
nd = subsetted.obsm[UMAPName]
numTypes = subsetted.obs[cellTypeColumnName].unique().shape[0]
colors = []
palette = sns.color_palette(colorScheme, n_colors=numTypes)
for x in range(color.shape[0]):
colors.append(palette[color[x]])
plt.scatter(nd[:, 0], nd[:, 1],
c=colors, s=pointSize, rasterized=True)
plt.title("UMAP Projection of Pattern Matrix", fontsize=24)
handles = []
for i in range(numTypes):
handles.append(mpatches.Patch(color=(sns.color_palette(colorScheme, n_colors=numTypes)[i]),
label=subsetted.obs[cellTypeColumnName].unique()[i]))
plt.legend(handles=handles, title="Cell Types", fontsize='xx-small', loc='best')
if display is True and path is None:
plt.show()
if display is True and path is not None:
plt.show()
plt.savefig(path, dpi=dpi)
if display is False and path is not None:
plt.savefig(path, dpi=dpi)
plt.close()
def featurePlots(dataset_filtered, num_patterns, projectionName, UMAPName, vmin=.00000000001, clip=99.5,
zeroColor='dimgrey', obsColumn=None, cmap='viridis', pointSize=.1, subset=None,
path=None, display=True, dpi=300):
"""Creates plots which show the weight of each feature in each cell.
:param clip: Stops colorbar at the percentile specified [0,100]
:param vmin: Min of the colorplot i.e. what to define as zero
:param zeroColor: What color the cells below vmin should be colored
:param dataset_filtered: Anndata object cells x genes
:param num_patterns: the number of the patterns to display starting from feature 1. It can also take a list of ints.
:param projectionName: index of the projection in dataset_filtered.obsm
:param UMAPName: index of the UMAP in dataset_filtered.obsm
:param obsColumn: Column in dataset_filtered to use for subsetting
:param cmap: colormap to use when creating the feature plots
:param pointSize: Size of the points on the plots
:param subset: subset of types in cell type column name to plot
:param path: path to save figure without a file type suffix like pdf png
:param display: Whether to display the figure in the jupyter notebook
:param dpi: Quality of the plot to be saved
:return: void, files will be .png
"""
matcher.sourceIsValid(dataset_filtered)
if subset is not None:
if obsColumn is None:
raise ValueError("obsColum cannot be None when filtering")
data = dataset_filtered[dataset_filtered.obs[obsColumn].isin(subset)]
pattern_matrix = data.obsm[projectionName]
print(pattern_matrix.shape)
if subset is None:
pattern_matrix = dataset_filtered.obsm[projectionName]
data = dataset_filtered
if isinstance(num_patterns, list):
colors = plt.get_cmap(cmap)
colors.set_under(zeroColor)
for i in num_patterns:
feature = pattern_matrix[:, i - 1]
plt.title("Feature " + str(i), fontsize=24)
plt.scatter(data.obsm[UMAPName][:, 0], data.obsm[UMAPName][:, 1], c=feature,
cmap=colors, vmin=vmin, vmax=np.percentile(feature, clip), s=pointSize)
plt.colorbar()
print("Number of nonzero cells " + str(np.count_nonzero(feature)))
if path is None:
plt.show()
if path is not None and display is True:
plt.show()
plt.savefig(path + str(i) + ".png", dpi=dpi)
if path is not None and display is False:
plt.savefig(path + str(i) + ".png", dpi=dpi)
plt.close()
else:
colors = plt.get_cmap(cmap)
colors.set_under(zeroColor)
for i in range(num_patterns):
feature = pattern_matrix[:, i]
plt.title("Feature " + str(i + 1), fontsize=24)
plt.scatter(data.obsm[UMAPName][:, 0], data.obsm[UMAPName][:, 1], c=feature, cmap=colors, vmin=vmin,
vmax=np.percentile(feature, clip), s=pointSize)
plt.colorbar()
print("Number of nonzero cells " + str(np.count_nonzero(feature)))
if path is None:
plt.show()
if path is not None and display is True:
plt.show()
plt.savefig(path + str(i + 1) + ".png", dpi=dpi)
if path is not None and display is False:
plt.savefig(path + str(i + 1) + ".png", dpi=dpi)
plt.close()
def patternWeightDistribution(dataset_filtered, projectionName, patterns, obsColumn, subset, numBins=100):
"""
:param dataset_filtered: Anndata object cells x genes
:param projectionName:index of the projection in dataset_filtered.obsm
:param patterns: Which patterns to visualize (one indexed)
:param obsColumn: Column in dataset_filtered to use for subsetting
:param subset: What subset of cells in the obsColumn to visualize
:param numBins: How many bins in the histogram
:return: void displays a histogram of the pattern weights above 0
"""
subset = dataset_filtered[dataset_filtered.obs[obsColumn].isin(list(subset))]
print("This subset has shape:", subset.shape)
for i in patterns:
filte = subset.obsm[projectionName][:, i-1] > 0
maxval = np.max(subset.obsm[projectionName][:, i-1][filte])
if maxval is 0:
print("Feature " + str(i) + " is all 0 in this subset")
continue
bins = np.arange(0, maxval+1, (maxval + 1) / numBins)
plt.title("Feature " + str(i))
plt.hist(subset.obsm[projectionName][:, i-1][filte], bins=bins)
plt.show()
def rankedByWeightedCIViz(projectionDriverOutput, pointLabel, weightTitle, pathForWeight, bonTitle, pathForBon, numGenesToPlot=50):
"""
:param projectionDriverOutput: Output from the projectionDriver function
:param pointLabel: label for the CI point
:param weightTitle: Title for the Weighted CI plot
:param pathForWeight: Path for the Weighted CI plot
:param bonTitle: Title for the Bon CI plot
:param pathForBon: Path for the Bon CI plot
:param numGenesToPlot: The number of genes to plot on both plots
:return:
"""
sigs = projectionDriverOutput[0].index
wCIs = projectionDriverOutput[1].loc[sigs]
wCIs['WRank'] = abs(wCIs['Low'] + wCIs['High'])
wCIs = wCIs.sort_values(by='WRank', ascending=False)
wCIs = wCIs.head(numGenesToPlot)
zipperWeighted = zip(list(wCIs.index), wCIs['Low'], wCIs['High'])
counter = len(wCIs) - 1
genes = []
for geneName, low, high in zipperWeighted:
genes.insert(0, geneName)
plt.plot((low, high), (counter, counter), '-', color='blue')
if counter is 1:
plt.plot((float(low + high) / 2.0), counter, 'o', color='blue', label=pointLabel)
else:
plt.plot((float(low + high) / 2.0), counter, 'o', color='blue')
counter -= 1
plt.title(weightTitle)
plt.plot((0, 0), (0, numGenesToPlot), '--', color='black')
plt.ylim(top=numGenesToPlot)
plt.ylim(bottom=-1)
plt.legend()
plt.yticks(range(len(genes)), genes)
plt.savefig(pathForWeight, dpi=300, bbox_inches='tight')
plt.show()
bCIs = projectionDriverOutput[2].loc[wCIs.index]
zipperBCI = zip(list(bCIs.index), bCIs['Low'], bCIs['High'])
counter = len(wCIs) - 1
genes = []
for geneName, low, high in zipperBCI:
genes.insert(0, geneName)
plt.plot((low, high), (counter, counter), '-', color='blue')
if counter is 1:
plt.plot((float(low + high) / 2.0), counter, 'o', color='blue', label=pointLabel)
else:
plt.plot((float(low + high) / 2.0), counter, 'o', color='blue')
counter -= 1
plt.title(bonTitle)
plt.plot((0, 0), (0, numGenesToPlot), '--', color='black')
plt.ylim(top=numGenesToPlot)
plt.ylim(bottom=-1)
plt.legend()
plt.yticks(range(len(genes)), genes)
plt.savefig(pathForBon, dpi=300, bbox_inches='tight')
plt.show() | StarcoderdataPython |
3574740 | from django.forms import ModelForm
from .models import *
class CollectiveOrderForm(ModelForm):
class Meta:
model = CollectiveOrder
fields = '__all__'
exclude = ['customer', 'transaction_id']
class CollectiveOrderItemsForm(ModelForm):
class Meta:
model = CollectiveOrderItem
fields = '__all__'
class CollectiveShippingDetailsForm(ModelForm):
class Meta:
model = CollectiveShippingAddress
fields = '__all__'
class CollectiveProductsForm(ModelForm):
class Meta:
model = CollectiveProduct
fields = '__all__'
class CollectiveCategoriesForm(ModelForm):
class Meta:
model = CollectiveCategory
fields = '__all__'
exclude = ['slug']
| StarcoderdataPython |
1655890 | #!/usr/bin/env python3
#
# Harano Aji Fonts generator
# https://github.com/trueroad/HaranoAjiFonts-generator
#
# make_shift.py:
# create shift parameters from letter face
#
# Copyright (C) 2020 <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import sys
def calc_shift (name, width, ascender, descender, \
face_width, face_height, lsb, tsb):
if name == "aji08269" or \
name == "aji08273" or \
name == "aji08283":
# CID+707 -> CID+8269 (GSUB vert/vrt2, `°` U+00B0 'DEGREE SIGN')
# CID+708 -> CID+8273 (GSUB vert/vrt2, `′` U+2032 'PRIME')
# CID+709 -> CID+8283 (GSUB vert/vrt2, `″` U+2033 'DOUBLE PRIME')
# Top left to bottom right
new_lsb = width - (face_width + lsb)
new_tsb = ascender - (descender + (face_height + tsb))
return new_lsb, new_tsb
elif name == "aji16326" or \
name == "aji16327":
# CID+16326 U+3099 'COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK'
# CID+16327 U+309A 'COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK'
# Left outside (right of previous letter face)
# to left inside of letter face
new_lsb = - lsb - face_width
if new_lsb < 0:
new_lsb = 0
return new_lsb, tsb
print ("# no shift: {}".format (name))
return lsb, tsb
def load_table (file):
table = {}
with open (file, "r") as f:
for line in f:
if line.startswith ('#'):
continue
items = line.split ()
name = items[0]
x_min = float (items[1])
y_min = float (items[2])
x_max = float (items[3])
y_max = float (items[4])
table[name] = (x_min, y_min, x_max, y_max)
return table
def main ():
if len (sys.argv) == 1:
print ("Usage: make_shift.py letter_face01.tbl > shift.tbl")
exit (1)
table_filename = sys.argv[1]
table = load_table (table_filename)
print ("# name width x-trans y-trans x-scale y-scale")
width = 1000
ascender = 880
descender = -120
for name in table:
x_min, y_min, x_max, y_max = table[name]
face_width = x_max - x_min
face_height = y_max - y_min
lsb = x_min
tsb = ascender - y_max
new_lsb, new_tsb = calc_shift (name, width, ascender, descender,
face_width, face_height, lsb, tsb)
print ("{}\t{}\t{}\t{}\t{}\t{}".format (name, width,
new_lsb - lsb,
tsb - new_tsb,
1, 1))
if __name__ == "__main__":
main ()
| StarcoderdataPython |
11223034 | <reponame>ErickDiaz/tesis_master_ingmate
#!/usr/bin/python
"""
Released under the MIT License
Copyright 2015-2016 MrTijn/Tijndagamer
"""
from bmp180 import bmp180
bmp = bmp180(0x77)
print("Temp: " + str(bmp.get_temp()) + " Celcius")
print("Pressure: " + str(bmp.get_pressure()) + " Pascal")
print("Altitude: " + str(bmp.get_altitude()) + " meter") | StarcoderdataPython |
11296306 | from retico_core.abstract import *
from retico_core import audio
from retico_core import debug
from retico_core import network
from retico_core import text
from retico_core import dialogue
__version__ = "0.2.0" # This is the version that is used basically everywhere
| StarcoderdataPython |
8089929 | import json
import urllib.request
import pandas as pd
import os
class Covid19IndiaNationalLoader:
def __init__(self):
if os.path.exists("../data"):
self.store_location = '../data/covid19india_national_daily.pickle'
elif os.path.exists("../../data"):
self.store_location = '../../data/covid19india_national_daily.pickle'
self.url = 'https://api.covid19india.org/data.json'
def load(self):
try:
# Read from cache
print("Reading from cache")
data = pd.read_pickle(self.store_location)
except:
# If not available in cache, then download and store to cache
print(f"Not available in cache, downloading from {self.url}")
with urllib.request.urlopen(self.url) as data_url:
raw_data = json.loads(data_url.read().decode())
data = pd.json_normalize(raw_data['cases_time_series'])
data['date'] = pd.to_datetime(data['date']+"2020")
data = data.set_index('date')
assert list(data.columns) == ['dailyconfirmed', 'dailydeceased', 'dailyrecovered', 'totalconfirmed',
'totaldeceased', 'totalrecovered']
data.columns = pd.MultiIndex.from_product([['India'], ['Confirmed', 'Deceased', 'Recovered', 'TotalConfirmed', 'TotalDeceased', 'TotalRecovered']], names=["Country", "Status"])
data = data.astype(int)
data.to_pickle(self.store_location)
return data
| StarcoderdataPython |
5194227 | import requests
import json
import urllib.parse
def get_aws_access_key(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_account, turbot_user_id, api_version):
""" Gets the federated access keys for a specified account
:return: Returns the access key, secret key and session token for an account"""
api_method = "POST"
api_url = "/api/%s/accounts/%s/users/%s/awsCredentials" % (api_version, turbot_account, turbot_user_id)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
responseObj = json.loads(response.text)
akey = responseObj['accessKeyId']
skey = responseObj['secretAccessKey']
token = responseObj['sessionToken']
return (akey, skey, token)
def list_user_access_keys(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_account, turbot_user_id, api_version):
""" Sets user access AKIA key pairs for a specified account
NOTE: This requires a Cluster role Turbot/Owner or higher in order to work.
"""
api_method = "GET"
api_url = "/api/%s/accounts/%s/users/%s/awsAccessKeys" % (api_version, turbot_account, turbot_user_id)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
responseObj = json.loads(response.text)
if'accessKeyId' in responseObj['items'][0]:
exists = True
akey = responseObj['items'][0]['accessKeyId']
else:
exists = False
akey = False
return (exists, akey)
def delete_user_access_keys(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_account, turbot_user_id, akey, api_version):
""" Sets user access AKIA key pairs for a specified account
NOTE: This requires a Cluster role Turbot/Owner or higher in order to work.
"""
api_method = "DELETE"
api_url = "/api/%s/accounts/%s/users/%s/awsAccessKeys/%s" % (api_version, turbot_account, turbot_user_id, akey)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
def create_user_access_keys(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_account, turbot_user_id, api_version):
""" Sets user access AKIA key pairs for a specified account
NOTE: This requires a Cluster role Turbot/Owner or higher in order to work.
"""
api_method = "POST"
api_url = "/api/%s/accounts/%s/users/%s/awsAccessKeys" % (api_version, turbot_account, turbot_user_id)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
responseObj = json.loads(response.text)
akey = responseObj['accessKeyId']
skey = responseObj['secretAccessKey']
return (akey, skey)
def get_account_tags(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_account, api_version):
""" Sets user access AKIA key pairs for a specified account
NOTE: This requires a Cluster role Turbot/Owner or higher in order to work.
"""
api_method = "GET"
api_url = "/api/%s/accounts/%s/" % (api_version, turbot_account)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
responseObj = json.loads(response.text)
# If the account does not have tags, return false for an easy way to test later
if 'tags' in responseObj:
return responseObj['tags']
else:
return False
def create_user_ssh_keys(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host, turbot_user_id, api_version):
""" Sets user access AKIA key pairs for a specified account
NOTE: This requires a Cluster role Turbot/Owner or higher in order to work.
"""
api_method = "POST"
api_url = "/api/%s/users/%s/sshKeys" % (api_version, turbot_user_id)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
responseObj = json.loads(response.text)
return(responseObj['publicKey'])
def add_user_to_account(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host,userarn, permissions, urn, api_version):
''' Adds a user to account with Grant'''
import requests
import json
import urllib.parse
# Set to the required API request type and location
api_url = "/api/%s/resources/%s/grants/%s" % (api_version, urn, permissions)
data = {"identityUrn": userarn, "activate": True}
response = requests.post(
json=data,
url=urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key)
)
# Convert the response JSON into a Python object and store it if we need it
responseObj = json.loads(response.text)
def delete_user_grant(turbot_api_access_key, turbot_api_secret_key, turbot_host_certificate_verification, turbot_host,userarn, permissions, urn, api_version):
''' Adds a user to account with Grant'''
import requests
import json
import urllib.parse
# Set to the required API request type and location
api_method = "DELETE"
api_url = "/api/%s/resources/%s/grants/%s/%s" % (api_version, urn, permissions,userarn)
response = requests.request(
api_method,
urllib.parse.urljoin(turbot_host, api_url),
auth=(turbot_api_access_key, turbot_api_secret_key),
verify=turbot_host_certificate_verification,
headers={
'content-type': "application/json",
'cache-control': "no-cache"
}
)
| StarcoderdataPython |
3507222 | <filename>aws/templates/app_cluster.py
from cfn_pyplates.core import CloudFormationTemplate, Mapping, Parameter, \
Resource, Properties, DependsOn, Output
from cfn_pyplates.functions import join
AWS_REGIONS_AZ = {
'eu-west-1': ["eu-west-1a", "eu-west-1b", "eu-west-1c"],
'eu-central-1': ["eu-central-1a", "eu-central-1b"]
}
def get_extra_cloud_config_from_file(cloud_config_file):
cloud_config = None
with file(cloud_config_file) as f:
cloud_config = f.readlines()
return cloud_config
class BaseCFConf(object):
def __init__(self, data):
self.data = data
def get_user_cloud_config(self):
return get_extra_cloud_config_from_file('./aws/templates/users.yml')
def _get_autoscale(
self, name,
extra_security_groups=[], extra_cloud_config='',
extra_props_autoscale={},
extra_props_launch={}, extra_attrs_launch=[],
config_min_size=3, config_max_size=3
):
# general configs
autoscale_name = '%sServerAutoScale' % name
autoscale_launch_config = '%sServerLaunchConfig' % name
# autoscaling configs
props_autoscale = {
"AvailabilityZones": {
"Fn::GetAZs": {"Ref": "AWS::Region"}
},
"LaunchConfigurationName": {
"Ref": autoscale_launch_config
},
"MinSize": "%s" % config_min_size,
"MaxSize": "%s" % config_max_size,
"Tags": [
{
"Key": "Name",
"Value": name,
"PropagateAtLaunch": True
},
{
"Key": "Role",
"Value": name,
"PropagateAtLaunch": True
}
]
}
props_autoscale.update(extra_props_autoscale)
# launch configs
sec_groups = [
{"Ref": sec_group} for sec_group in ["SSHFromBastionSecurityGroup"] + extra_security_groups
]
cloud_config = self.get_user_cloud_config()
cloud_config += extra_cloud_config
props_launch = {
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
name
]
},
"InstanceType": {
"Ref": "%sInstanceType" % name
},
"SecurityGroups": sec_groups,
"UserData": {
"Fn::Base64": join('', *cloud_config)
}
}
props_launch.update(extra_props_launch)
attrs_launch = extra_attrs_launch
return [
Resource(
autoscale_name,
"AWS::AutoScaling::AutoScalingGroup",
Properties(props_autoscale)
),
Resource(
autoscale_launch_config,
"AWS::AutoScaling::LaunchConfiguration",
Properties(props_launch),
attributes=attrs_launch
)
]
def _get_mappings(self):
return []
def _get_resources(self):
return []
def _get_parameters(self):
return []
def _get_outputs(self):
return []
def _data(self):
return {
'mappings': self._get_mappings(),
'resources': self._get_resources(),
'parameters': self._get_parameters(),
'outputs': self._get_outputs()
}
def add(self, cft):
for prop, l in self._data().items():
for val in l:
getattr(cft, prop).add(val)
return self.data
class GeneralCFConf(BaseCFConf):
def _get_mappings(self):
mapping_props = {
"eu-central-1": {
"nat": "ami-1e073a03",
"SSHBastion": "ami-accff2b1"
},
"eu-west-1": {
"nat": "ami-14913f63",
"SSHBastion": "ami-47a23a30"
}
}
mapping_props[self.data['region']].update({
"CoreOS": self.data['coreos_ami'],
"EtcdCluster": self.data['coreos_ami'],
})
return [
Mapping('RegionMap', mapping_props)
]
def _get_parameters(self):
return [
Parameter('EnvName', 'String', {
'Description': 'Environment Name'
}),
Parameter(
"SSHBastionInstanceType",
"String",
{
"Description": "SSH Bastion EC2 HVM instance type (m3.medium, etc).",
"Default": "t2.micro",
"ConstraintDescription": "Must be a valid EC2 HVM instance type."
}
),
Parameter(
"NATInstanceType",
"String",
{
"Description": "NAT EC2 HVM instance type (m3.medium, etc).",
"Default": "t2.small",
"ConstraintDescription": "Must be a valid EC2 HVM instance type."
}
),
Parameter(
"EtcdClusterInstanceType",
"String",
{
"Description": "EC2 HVM instance type (m3.medium, etc).",
"Default": "t2.micro",
"ConstraintDescription": "Must be a valid EC2 HVM instance type."
}
),
Parameter(
"CoreOSInstanceType",
"String",
{
"Description": "EC2 HVM instance type (m3.medium, etc).",
"Default": "t2.medium",
"ConstraintDescription": "Must be a valid EC2 HVM instance type."
}
),
Parameter(
"CoreOSClusterSize",
"Number", {
"Default": "3",
"MinValue": "1",
"MaxValue": "12",
"Description": "Number of CoreOS worker nodes in cluster (1-12).",
}
),
Parameter(
"EtcdClusterSize",
"Number", {
"Default": "3",
"MinValue": "1",
"MaxValue": "12",
"Description": "Number of CoreOS service nodes in cluster (etcd cluster) (1-12).",
}
),
Parameter(
"DiscoveryURL",
"String", {
"Description": "An unique etcd cluster discovery URL. Grab a new token from https://discovery.etcd.io/new",
}
),
Parameter(
"DBAllocatedStorage",
"Number", {
"Description": "Allocated DB storage (in GB)",
"Default": "10",
}
),
Parameter(
"DBInstanceClass",
"String", {
"Description": "RDS instance type",
"Default": "db.t2.small",
}
),
Parameter(
"DBUsername",
"String", {
"Description": "Database username",
}
),
Parameter(
"DBPassword",
"String", {
"Description": "Database username",
}
),
Parameter(
"DBName",
"String", {
"Description": "Database name",
}
)
]
def _get_resources(self):
return [
Resource(
"PublicHTTPSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "Ingress for port 80 from anywhere",
"SecurityGroupIngress": [
{
"CidrIp": "0.0.0.0/0",
"IpProtocol": "tcp",
"FromPort": "80",
"ToPort": "80"
}
],
"Tags": [
{"Key": "Name", "Value": "PublicHTTPSecurityGroup"}
]
})
),
Resource(
"SSHBastionSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "Ingress for SSH from anywhere",
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": "22",
"ToPort": "22",
"CidrIp": "0.0.0.0/0"
}
],
"Tags": [
{"Key": "Name", "Value": "SSHBastionSecurityGroup"}
]
})
),
Resource(
"SSHFromBastionSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "SSH from SSH Bastion SecurityGroup",
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": "22",
"ToPort": "22",
"SourceSecurityGroupId": {"Ref": "SSHBastionSecurityGroup"}
}
],
"Tags": [
{"Key": "Name", "Value": "SSHFromBastionSecurityGroup"}
]
})
),
Resource(
"SQSUser",
"AWS::IAM::User",
Properties({
"Policies":
[{
"PolicyName": "AmazonSQSFullAccess",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement":
[{
"Effect": "Allow",
"Action": [
"sqs:*"
],
"Resource": "*"
}]
}
}]
}
)),
Resource(
"SQSAccessKey",
"AWS::IAM::AccessKey",
Properties({
"UserName" : { "Ref" : "SQSUser" }
})
)
]
def _get_outputs(self):
return [
Output(
"Region",
{"Ref": "AWS::Region"},
"AWS Region"
),
Output(
"SQSAccessKey",
{"Ref" : "SQSAccessKey" },
"SQS Access Key"
),
Output(
"SQSSecretKey",
{"Fn::GetAtt" : [ "SQSAccessKey", "SecretAccessKey" ]},
"SQS Secret Key"
)
]
class SubnetsCFConf(BaseCFConf):
def _get_mappings(self):
mapping_props = {
"VPC": {"CIDR": "10.0.0.0/16"},
}
self.data['public_subnets'] = []
self.data['private_subnets'] = []
for index, az in enumerate(AWS_REGIONS_AZ.get(self.data['region']), start=1):
public_subnet_name = 'PublicSubnet%s' % index
private_subnet_name = 'PrivateSubnet%s' % index
mapping_props[public_subnet_name] = {
"CIDR": "10.0.%s.0/24" % (index-1),
"AZ": az
}
mapping_props[private_subnet_name] = {
"CIDR": "10.0.%s.0/24" % (100+index-1),
"AZ": az
}
# add subnet names to data
self.data['public_subnets'].append(public_subnet_name)
self.data['private_subnets'].append(private_subnet_name)
return [
Mapping('SubnetConfig', mapping_props)
]
def _get_public_subnets(self):
resources = [
Resource(
"PublicRouteTable",
"AWS::EC2::RouteTable",
Properties({
"VpcId": {"Ref": "VPC"},
"Tags": [
{"Key": "Application", "Value": {"Ref": "AWS::StackId"}}
]
})
),
Resource(
"PublicRoute",
"AWS::EC2::Route",
Properties({
"RouteTableId": {"Ref": "PublicRouteTable"},
"DestinationCidrBlock": "0.0.0.0/0",
"GatewayId": {"Ref": "InternetGateway"}
}), attributes=[
DependsOn("GatewayToInternet")
]
),
]
for subnet_name in self.data['public_subnets']:
table_association_name = "%sRouteTableAssociation" % subnet_name
resources += [
Resource(
subnet_name,
"AWS::EC2::Subnet",
Properties({
"VpcId": {"Ref": "VPC"},
"AvailabilityZone": {
"Fn::FindInMap": ["SubnetConfig", subnet_name, "AZ"]
},
"CidrBlock": {
"Fn::FindInMap": ["SubnetConfig", subnet_name, "CIDR"]
},
"Tags": [
{"Key": "Application", "Value": {"Ref": "AWS::StackId"}},
{"Key": "Network", "Value": subnet_name}
]
})
),
Resource(
table_association_name,
"AWS::EC2::SubnetRouteTableAssociation",
Properties({
"SubnetId": {"Ref": subnet_name},
"RouteTableId": {"Ref": "PublicRouteTable"}
})
),
]
return resources
def _get_private_subnets(self):
resources = [
Resource(
"PrivateRouteTable",
"AWS::EC2::RouteTable",
Properties({
"VpcId": {"Ref": "VPC"},
"Tags": [
{"Key": "Name", "Value": "PrivateRouteTable"}
]
})
),
Resource(
"PrivateInternetRoute",
"AWS::EC2::Route",
Properties({
"RouteTableId": {"Ref": "PrivateRouteTable"},
"DestinationCidrBlock": "0.0.0.0/0",
"InstanceId": {
"Ref": "NATInstance"
}
}), attributes=[
DependsOn("NATInstance")
]
),
]
for subnet_name in self.data['private_subnets']:
table_association_name = "%sRouteTableAssociation" % subnet_name
resources += [
Resource(
subnet_name,
"AWS::EC2::Subnet",
Properties({
"VpcId": {"Ref": "VPC"},
"AvailabilityZone": {
"Fn::FindInMap": ["SubnetConfig", subnet_name, "AZ"]
},
"CidrBlock": {
"Fn::FindInMap": ["SubnetConfig", subnet_name, "CIDR"]
},
"Tags": [
{"Key": "Application", "Value": {"Ref": "AWS::StackId"}},
{"Key": "Network", "Value": subnet_name}
]
})
),
Resource(
table_association_name,
"AWS::EC2::SubnetRouteTableAssociation",
Properties({
"SubnetId": {"Ref": subnet_name},
"RouteTableId": {"Ref": "PrivateRouteTable"}
})
),
]
return resources
def _get_resources(self):
resources = [
Resource(
"VPC",
"AWS::EC2::VPC",
Properties({
"CidrBlock": {
"Fn::FindInMap": ["SubnetConfig", "VPC", "CIDR"]
},
"EnableDnsSupport": "true",
"EnableDnsHostnames": "true",
"Tags": [
{"Key": "Application", "Value": {"Ref": "AWS::StackId"}}
]
})
),
Resource(
"InternetGateway",
"AWS::EC2::InternetGateway",
Properties({
"Tags": [
{"Key": "Application", "Value": {"Ref": "AWS::StackId"}}
]
})
),
Resource(
"GatewayToInternet",
"AWS::EC2::VPCGatewayAttachment",
Properties({
"VpcId": {"Ref": "VPC"},
"InternetGatewayId": {"Ref": "InternetGateway"}
})
)
]
resources += self._get_public_subnets()
resources += self._get_private_subnets()
return resources
class NATCFConf(BaseCFConf):
def _get_resources(self):
return [
Resource(
"NATSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "NAT Instance Security Group",
"SecurityGroupIngress": [
{
"IpProtocol": "icmp",
"FromPort": "-1",
"ToPort": "-1",
"CidrIp": "10.0.0.0/16"
},
{
"IpProtocol": "tcp",
"FromPort": "0",
"ToPort": "65535",
"CidrIp": "10.0.0.0/16"
}
],
"Tags": [
{"Key": "Name", "Value": "NATSecurityGroup"}
]
})
),
Resource(
"NATInstance",
"AWS::EC2::Instance",
Properties({
"ImageId": {
"Fn::FindInMap": [
"RegionMap",
{
"Ref": "AWS::Region"
},
"nat"
]
},
"InstanceType": {
"Ref": "NATInstanceType"
},
"BlockDeviceMappings": [
{
"DeviceName": "/dev/xvda",
"Ebs": {
"VolumeSize": 10
}
}
],
"NetworkInterfaces": [
{
"GroupSet": [
{"Ref": "NATSecurityGroup"},
{"Ref": "SSHFromBastionSecurityGroup"}
],
"SubnetId": {
"Ref": self.data['public_subnets'][0]
},
"AssociatePublicIpAddress": "true",
"DeviceIndex": "0",
"DeleteOnTermination": "true"
}
],
"SourceDestCheck": "false",
"Tags": [
{"Key": "Name", "Value": "NATHost"},
{"Key": "Role", "Value": "NAT"}
],
"UserData": {
"Fn::Base64": {
"Fn::Join": [
"",
self.get_user_cloud_config()
]
}
}
}),
attributes=[
DependsOn("GatewayToInternet")
]
)
]
class SSHBastionCFConf(BaseCFConf):
def _get_resources(self):
resources = self._get_autoscale(
'SSHBastion',
extra_props_autoscale={
"VPCZoneIdentifier": [
{"Ref": subnet_name} for subnet_name in self.data['public_subnets']
],
},
extra_props_launch={
"AssociatePublicIpAddress": "true",
"BlockDeviceMappings": [
{
"DeviceName": "/dev/xvda",
"Ebs": {
"VolumeSize": 10
}
}
]
},
extra_attrs_launch=[
DependsOn("GatewayToInternet")
],
extra_security_groups=['SSHBastionSecurityGroup']
)
return resources
class CoreOSCFConf(BaseCFConf):
"""
cloud-config details here: https://github.com/coreos/coreos-cloudinit/blob/master/Documentation/cloud-config.md
more etcd2 configs here: https://github.com/coreos/etcd/blob/86e616c6e974828fc9119c1eb0f6439577a9ce0b/Documentation/configuration.md
more fleet configs here: https://github.com/coreos/fleet/blob/master/fleet.conf.sample
"""
def _get_etcd_cluster_resources(self):
resources = self._get_autoscale(
'EtcdCluster',
extra_security_groups=['CoreOSSecurityGroup'],
extra_cloud_config=[
"coreos:\n",
" update:\n",
" reboot-strategy: etcd-lock\n",
" etcd2:\n",
" discovery: ", {"Ref": "DiscoveryURL"}, "\n",
" advertise-client-urls: http://$private_ipv4:2379\n",
" initial-advertise-peer-urls: http://$private_ipv4:2380\n",
" listen-client-urls: http://0.0.0.0:2379\n",
" listen-peer-urls: http://$private_ipv4:2380\n",
" fleet:\n",
" metadata: \"role=services\"\n",
" etcd_servers: http://127.0.0.1:2379\n"
" units:\n",
" - name: etcd2.service\n",
" command: start\n",
" - name: fleet.service\n",
" command: start\n"
],
config_min_size=1,
config_max_size=12,
extra_props_autoscale={
"VPCZoneIdentifier": [
{"Ref": subnet_name} for subnet_name in self.data['private_subnets']
],
"DesiredCapacity": {
"Ref": "EtcdClusterSize"
}
}
)
return resources
def _get_coreos_resources(self):
resources = self._get_autoscale(
'CoreOS',
extra_security_groups=['CoreOSSecurityGroup', 'WebAppSecurityGroup'],
extra_cloud_config=[
"coreos:\n",
" etcd2:\n",
" discovery: ", {"Ref": "DiscoveryURL"}, "\n",
" proxy: on\n",
" listen-client-urls: http://0.0.0.0:2379\n"
" fleet:\n",
" metadata: \"role=worker\"\n",
" etcd_servers: http://127.0.0.1:2379\n",
" flannel:\n",
" etcd_endpoints: http://127.0.0.1:2379\n",
" units:\n",
" - name: etcd2.service\n",
" command: start\n",
" - name: fleet.service\n",
" command: start\n",
" - name: flanneld.service\n",
" drop-ins:\n",
" - name: 50-network-config.conf\n",
" content: |\n",
" [Unit]\n",
" Requires=etcd2.service\n",
" [Service]\n",
" ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ \"Network\": \"192.168.192.0/18\", \"Backend\": {\"Type\": \"vxlan\"}}'\n",
" command: start\n"
],
config_min_size=1,
config_max_size=12,
extra_props_autoscale={
"VPCZoneIdentifier": [
{"Ref": subnet_name} for subnet_name in self.data['private_subnets']
],
"DesiredCapacity": {
"Ref": "CoreOSClusterSize"
}
}
)
resources.append(
Resource(
"WebAppSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "WebApp SecurityGroup",
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": "8080",
"ToPort": "8080",
"SourceSecurityGroupId": {"Ref": "NATSecurityGroup"}
},
{
"IpProtocol": "tcp",
"FromPort": "8080",
"ToPort": "8080",
"SourceSecurityGroupId": {"Ref": "PublicHTTPSecurityGroup"}
}
],
"Tags": [
{"Key": "Name", "Value": "WebAppSecurityGroup"}
]
})
)
)
return resources
def _get_resources(self):
resources = [
Resource(
"CoreOSSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "CoreOS SecurityGroup",
"SecurityGroupIngress": [
{
"CidrIp": "10.0.0.0/16",
"IpProtocol": "udp",
"FromPort": "0",
"ToPort": "65535"
},
{
"CidrIp": "10.0.0.0/16",
"IpProtocol": "icmp",
"FromPort": "-1",
"ToPort": "-1"
}
],
"Tags": [
{"Key": "Name", "Value": "CoreOSSecurityGroup"}
]
})
),
Resource(
"CoreOSSecurityGroup2380Ingress",
"AWS::EC2::SecurityGroupIngress",
Properties({
"GroupId": {"Ref": "CoreOSSecurityGroup"},
"IpProtocol": "tcp",
"FromPort": "2380",
"ToPort": "2380",
# "SourceSecurityGroupId": {"Ref": "CoreOSSecurityGroup"} # TODO not working for now because need to use fleetctl locally to load units
"CidrIp": "10.0.0.0/16"
}), attributes=[
DependsOn("CoreOSSecurityGroup")
]
),
Resource(
"CoreOSSecurityGroup2379Ingress",
"AWS::EC2::SecurityGroupIngress",
Properties({
"GroupId": {"Ref": "CoreOSSecurityGroup"},
"IpProtocol": "tcp",
"FromPort": "2379",
"ToPort": "2379",
# "SourceSecurityGroupId": {"Ref": "CoreOSSecurityGroup"} # TODO not working for now because need to use fleetctl locally to load units
"CidrIp": "10.0.0.0/16"
}), attributes=[
DependsOn("CoreOSSecurityGroup")
]
)
]
resources += self._get_etcd_cluster_resources()
resources += self._get_coreos_resources()
return resources
class DBCFConf(BaseCFConf):
def _get_resources(self):
return [
Resource(
"MasterDBSubnetGroup",
"AWS::RDS::DBSubnetGroup",
Properties({
"DBSubnetGroupDescription": "Master DB subnet group",
"SubnetIds": [
{"Ref": subnet_name} for subnet_name in self.data['private_subnets']
]
})
),
Resource(
"MasterDBSecurityGroup",
"AWS::EC2::SecurityGroup",
Properties({
"VpcId": {"Ref": "VPC"},
"GroupDescription": "Ingress for CoreOS instance security group",
"SecurityGroupIngress": [
{
"SourceSecurityGroupId": {"Ref": "CoreOSSecurityGroup"},
"IpProtocol": "tcp",
"FromPort": "5432",
"ToPort": "5432"
}
],
"Tags": [
{"Key": "Name", "Value": "MasterDBSecurityGroup"}
]
})
),
Resource(
"PrimaryDB",
"AWS::RDS::DBInstance",
Properties({
"DBName": {"Ref": "DBName"},
"AllocatedStorage": {"Ref": "DBAllocatedStorage"},
"DBInstanceClass": {"Ref": "DBInstanceClass"},
"Engine": "postgres",
"EngineVersion": "9.3.5",
"MasterUsername": {"Ref": "DBUsername"},
"MasterUserPassword": {"Ref": "DBPassword"},
"Port": "5432",
"VPCSecurityGroups": [{"Ref": "MasterDBSecurityGroup"}],
"PubliclyAccessible": "false",
"PreferredMaintenanceWindow": "sun:12:00-sun:12:30",
"PreferredBackupWindow": "23:00-23:30",
"BackupRetentionPeriod": "7",
"DBParameterGroupName": "default.postgres9.3",
"AutoMinorVersionUpgrade": "true",
"MultiAZ": "false",
"DBSubnetGroupName": {"Ref": "MasterDBSubnetGroup"},
"Tags": [{"Key": "Role", "Value": "Primary"}]
})
)
]
def _get_outputs(self):
return [
Output(
"PrimaryDBHostname",
{"Fn::GetAtt": ["PrimaryDB", "Endpoint.Address"]},
"Primary Database Hostname",
),
Output(
"PrimaryDBPort",
{"Fn::GetAtt": ["PrimaryDB", "Endpoint.Port"]},
"Primary Database Port"
)
]
class ELBCFConf(BaseCFConf):
def _get_resources(self):
return [
Resource(
"PublicELB",
"AWS::ElasticLoadBalancing::LoadBalancer",
Properties({
"Subnets": [
{"Ref": subnet_name} for subnet_name in self.data['public_subnets']
],
"SecurityGroups": [
{"Ref": "PublicHTTPSecurityGroup"}
],
"Listeners": [{
"LoadBalancerPort": "80",
"InstancePort": "8080",
"Protocol": "HTTP"
}],
"HealthCheck": {
"Target": "HTTP:8080/",
"HealthyThreshold": "3",
"UnhealthyThreshold": "5",
"Interval": "30",
"Timeout": "5"
}
})
),
Resource(
"PublicELBIAMUser",
"AWS::IAM::User",
Properties({
"Policies": [{
"PolicyName": "PublicELBRegisterDeregisterOnly",
"PolicyDocument": {
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Action": [
"elasticloadbalancing:DeregisterInstancesFromLoadBalancer",
"elasticloadbalancing:RegisterInstancesWithLoadBalancer"
],
"Resource": {
"Fn::Join": [
"",
[
"arn:aws:elasticloadbalancing:",
{
"Ref": "AWS::Region"
},
":",
{
"Ref": "AWS::AccountId"
},
":loadbalancer/",
{
"Ref": "PublicELB"
}
]
]
}
}]
}
}]
})
),
Resource(
"PublicELBAccessKey",
"AWS::IAM::AccessKey",
Properties({
"UserName": {"Ref": "PublicELBIAMUser"}
})
)
]
def _get_outputs(self):
return [
Output(
"PublicELBName",
{"Ref": "PublicELB"},
"Public ELB Name"
),
Output(
"PublicELBAccessKey",
{"Ref": "PublicELBAccessKey"},
"Public ELB ACCESS_KEY"
),
Output(
"PublicELBSecretAccessKey",
{"Fn::GetAtt": ["PublicELBAccessKey", "SecretAccessKey"]},
"Public ELB SECRET_ACCESS_KEY"
)
]
class PublicELBDNSConfs(BaseCFConf):
DNS_MAPPING = ('', 'next', 'previous')
def _get_dns_record_name(self, prefix):
return "Public%sELBDNSRecord" % prefix.title()
def _get_resources(self):
resources = []
dns_suffix = self.data['dns_suffix']
for prefix in self.DNS_MAPPING:
name = self._get_dns_record_name(prefix)
if prefix != '':
prefix = "%s." % prefix
resources.append(
Resource(
name,
"AWS::Route53::RecordSet",
Properties({
"HostedZoneName": dns_suffix,
"Comment": "DNS name for TeamCity",
"Name": {
"Fn::Join": ["", [
prefix,
{"Ref": "EnvName"},
'.',
dns_suffix
]]
},
"Type": "CNAME",
"TTL": "60",
"ResourceRecords": [
{"Fn::GetAtt": ["PublicELB",
"CanonicalHostedZoneName"]}
]
})
)
)
return resources
def _get_outputs(self):
outputs = []
for prefix in self.DNS_MAPPING:
name = self._get_dns_record_name(prefix)
outputs.append(
Output(
name,
{"Ref": name},
name
)
)
return outputs
CONFIGs = [
GeneralCFConf, SubnetsCFConf, NATCFConf, SSHBastionCFConf,
CoreOSCFConf, DBCFConf, ELBCFConf, PublicELBDNSConfs
]
def get_template(region, coreos_ami, dns_suffix):
cft = CloudFormationTemplate(
description='Core OS on EC2 app cluster'
)
data = {
'region': region,
'coreos_ami': coreos_ami,
'dns_suffix': dns_suffix
}
for Conf in CONFIGs:
data.update(
Conf(data=data).add(cft)
)
return cft
| StarcoderdataPython |
204244 | import os.path
import setuptools
# Get long description from README.
with open('README.rst', 'r') as fh:
long_description = fh.read()
# Get package metadata from '__about__.py' file.
about = {}
base_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(base_dir, 'resolwe_bio', '__about__.py'), 'r') as fh:
exec(fh.read(), about)
setuptools.setup(
name=about['__title__'],
use_scm_version=True,
description=about['__summary__'],
long_description=long_description,
long_description_content_type='text/x-rst',
author=about['__author__'],
author_email=about['__email__'],
url=about['__url__'],
license=about['__license__'],
# Exclude tests from built/installed package.
packages=setuptools.find_packages(
exclude=['tests', 'tests.*', '*.tests', '*.tests.*']
),
package_data={
'resolwe_bio': [
'descriptors/*.yml',
'fixtures/*.yaml',
"migrations/*.sql",
'processes/**/*.yml',
'processes/**/*.py',
'tools/*.py',
'tools/*.R',
'tools/*.sh',
]
},
python_requires='>=3.6, <3.9',
install_requires=(
'Django~=2.2.0',
'djangorestframework~=3.9.0',
'django-filter~=2.0.0',
'elasticsearch-dsl~=6.3.1',
# XXX: Required due to issue https://github.com/pypa/pip/issues/4905.
'resolwe >=21.0a1, ==21.*',
# XXX: Temporarily pin urllib to 1.24.x, since requests 2.21.0
# has requirement urllib3<1.25,>=1.21.1
'urllib3~=1.24.2',
'wrapt~=1.11.1',
),
extras_require={
'docs': [
# XXX: Temporarily pin Sphinx to version 1.5.x since 1.6 doesn't
# work with our custom page template.
'Sphinx~=1.5.6',
'sphinx_rtd_theme',
# XXX: Temporarily pin docutils to version 0.15.2 since Sphinx's
# requirements are too open and cannot correctly install the
# requested pre-release version of docutils.
'docutils==0.15.2',
'pyasn1>=0.4.8',
],
'package': ['twine', 'wheel'],
'test': [
'pycodestyle~=2.5.0',
'pydocstyle~=3.0.0',
'pylint~=2.3.1',
'tblib~=1.3.0',
'check-manifest',
'setuptools_scm',
'twine',
'six==1.12',
],
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords='bioinformatics resolwe bio pipelines dataflow django',
)
| StarcoderdataPython |
5087345 | <filename>src/__init__.py
"""Source code for the very good semantic segmentation labeler."""
| StarcoderdataPython |
1974791 | import dcase_util
chain = dcase_util.processors.ProcessingChain([
{
'processor_name': 'AudioReadingProcessor',
'init_parameters': {
'fs': 44100
}
},
{
'processor_name': 'RepositoryFeatureExtractorProcessor',
'init_parameters': {
'parameters': {
'mel': {}
}
}
},
{
'processor_name': 'RepositorySequencingProcessor',
'init_parameters': {
'sequence_length': 100
}
},
{
'processor_name': 'RepositoryToMatrixProcessor',
'init_parameters': {
'label': 'mel',
'expanded_dimension': 'last'
}
},
])
# Run the processing chain
data = chain.process(filename=dcase_util.utils.Example().audio_filename())
data.plot() | StarcoderdataPython |
3537398 | #!/usr/bin/env python3
from math import pi, atan, sin, cos, sqrt
from functools import reduce
import cv2
import numpy as np
import shm
from vision.modules.base import ModuleBase
from vision.framework.color import bgr_to_lab, elementwise_color_dist, range_threshold, color_dist
from vision.framework.transform import elliptic_kernel, dilate, erode, rect_kernel, morph_remove_noise, simple_gaussian_blur, resize
from vision.framework.feature import outer_contours, find_lines
from vision.framework.draw import draw_line
from vision.modules.attilus_garbage import garlic_crucifix_opts as opts, KMEANS_ITER, lines_to_angles, vectors_to_degrees, angle_to_unit_circle, angle_to_line, find_yellow_circle, intersect_circles, crop_by_mask, kmeans_mask, outline_mask
from auv_python_helpers.angles import abs_heading_sub_degrees, heading_sub_degrees
from vision.modules.gate import thresh_color_distance
COLORSPACE = 'lab'
class Recovery(ModuleBase):
def process(self, mat):
self.post('org', mat)
mat = resize(mat, mat.shape[1]//2, mat.shape[0]//2)
shm.bins_garlic.center_x.set(mat.shape[0]//2)
shm.bins_garlic.center_y.set(mat.shape[1]//2)
cvtmat, split = bgr_to_lab(mat)
self.circles = find_yellow_circle(split,
color=[self.options['yellow_{}'.format(s)] for s in COLORSPACE],
distance=self.options['circle_color_distance'],
erode_kernel=self.options['circle_erode_kernel'],
erode_iterations=self.options['circle_erode_iterations'],
dilate_kernel=self.options['circle_dilate_kernel'],
dilate_iterations=self.options['circle_dilate_iterations'],
min_contour_size=self.options['circle_min_contour_size'],
min_circularity=self.options['circle_min_circularity'],
radius_offset=self.options['garlic_circle_r_offset'])
cv2.drawContours(mat, [c['contour'] for c in self.circles], 0, (255, 0, 0), 10)
for c in self.circles:
cv2.circle(mat, *c['circle'], (0, 255, 0), 10)
self.post('circle', mat)
self.find_red_garlic(cvtmat, split)
def find_red_garlic(self, cvtmat, split):
color = [self.options['red_{}'.format(s)] for s in COLORSPACE]
distance = self.options['garlic_color_distance']
mask, _ = thresh_color_distance(split, color, distance, ignore_channels=[0])
mask = erode(mask, rect_kernel(self.options['garlic_erode_kernel']), iterations=self.options['garlic_erode_iterations'])
mask = dilate(mask, rect_kernel(self.options['garlic_dilate_kernel']), iterations=self.options['garlic_dilate_iterations'])
self.post('garlic', mask)
circle_id, mask_c = intersect_circles(self.circles, mask, min_size=self.options['garlic_size_min'])
if circle_id is not None:
(x, y), r = self.circles[circle_id]['circle']
shm.bins_garlic.center_x.set(x)
shm.bins_garlic.center_y.set(y)
only_circle = cv2.bitwise_and(cvtmat, cvtmat, mask=mask_c)
self.post('hmmm', only_circle)
only_circle = crop_by_mask(cvtmat, mask_c, x, y, r) # TODO: move this to a big function in recovery common
cross = kmeans_mask(only_circle, x, y, r,
target_centeroid=(self.options['red_l'], self.options['red_a'], self.options['red_b']),
centeroids=3, remove_noise=False,
morph_kernel=self.options['kmeans_morph_kernel'],
morph_iterations=self.options['kmeans_morph_iterations'])
# cross = dilate(cross, rect_kernel(self.options['kmeans_morph_kernel']))
# cross = erode(cross, rect_kernel(self.options['kmeans_morph_kernel']))
self.post('crus', cross)
cross = outline_mask(cross, simplify=False)
lines, vectors = self.find_garlic_angles(cross)
cross = cv2.cvtColor(cross, cv2.COLOR_GRAY2BGR)
for l in lines:
draw_line(cross, (int(l[0]), int(l[1])), (int(l[2]), int(l[3])), thickness=5)
for a in vectors:
draw_line(cvtmat, (x, y), (x+int(a[0]*1000), y+int(a[1]*1000)), thickness=5)
angles = [vectors_to_degrees(a) for a in vectors]
# print(angles)
manipulator_vector = angle_to_unit_circle(self.options['manipulator_angle']/180*pi)
# draw_line(cvtmat, (x, y), (x + int(manipulator_vector[0]*1000), y + int(manipulator_vector[1]*1000)), color=(0, 255, 0), thickness=4)
draw_line(cvtmat, *angle_to_line(self.options['manipulator_angle'], origin=(x,y)), color=(0, 255, 0), thickness=4)
closest = self.find_closest_angle(self.options['manipulator_angle'], *angles, post=True)
if closest is not None:
closest_unit_vector = angle_to_unit_circle(closest/180*pi)
# draw_line(cvtmat, (x, y), (x + int(closest_unit_vector[0]*1000), y + int(closest_unit_vector[1]*1000)), color=(255, 0, 0), thickness=5)
draw_line(cvtmat, *angle_to_line(closest, origin=(x,y)), color=(255, 0, 0), thickness=5)
self.post('angles', cvtmat)
self.post('hmm', cross)
def find_garlic_angles(self, garlic_mask):
lines = find_lines(garlic_mask, 2, pi/180, self.options['garlic_line_threshold'])[0]
print(lines)
center = garlic_mask.shape[0]//2, garlic_mask.shape[1]//2
def distance_from_center(line):
num = abs((line[3]-line[1])*center[1]-(line[2]-line[0])*center[0] + line[2]*line[1]-line[3]*line[0])
denom = sqrt((line[3]-line[1])**2 + (line[2]-line[0])**2)
return num/denom
lines = list(filter(lambda x: distance_from_center(x) < 80, lines))
angles = np.array([angle_to_unit_circle(lines_to_angles(l)) for l in lines], dtype=np.float32)
if len(angles) < 2: return lines, angles
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
compactness, label, centeroid = cv2.kmeans(angles, 2, None, criteria, KMEANS_ITER, cv2.KMEANS_RANDOM_CENTERS)
return lines, centeroid
def find_closest_angle(self, target, *angles, post=False):
if len(angles) == 0: return
# print(angles)
closest = min(angles, key=lambda x: abs_heading_sub_degrees(target, x))
print(closest)
if post:
shm.bins_garlic.angle_offset.set(heading_sub_degrees(target, closest))
return closest
if __name__ == '__main__':
Recovery('downward', opts)()
| StarcoderdataPython |
5047483 | <filename>src/profiler/img_to_base64.py
"""
Copyright 2018-2021 Board of Trustees of Stanford University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#!/usr/bin/python3
import matplotlib.image as mpimg
import numpy as np
import base64
import sys
import os
def main():
if len(sys.argv) != 2:
print("Usage: ./img_to_base64.py <image-path>")
sys.exit(1)
image_path = sys.argv[1]
if not os.path.exists(image_path):
print(image_path, "is not a valid path")
sys.exit(1)
img = mpimg.imread(image_path)
img_flatten = img.flatten().astype(np.float32)
img_bytes = img_flatten.tobytes()
b64_enc = base64.b64encode(img_bytes)
b64_string = str(b64_enc)
# Print for caller to grab
print(b64_string)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3568982 | import multiprocessing as mp
import random
import sys
import time
import numpy as np
class Worker(mp.Process):
def __init__(self, wid, in_q, out_q, vs, rrset_func):
super(Worker, self).__init__(target=self.start)
self.wid = wid
self.in_q = in_q
self.out_q = out_q
self.vs = vs
self.rrset_func = rrset_func
seed = time.time_ns() % (2 ** 30)
np.random.seed(seed)
random.seed(seed)
def run(self):
n = len(self.vs)
time_limit, pharse_num, mem_limit, size_limit = self.in_q.get()
# print(f'id = {self.wid}, time_limit = {time_limit}, rrset_limit = {rrset_limit}')
# run one pharse
pharse_size, target_pharse_size = 0, int(mem_limit / pharse_num)
buffer = []
while pharse_size <= target_pharse_size:
r = int(random.random() * n)
rrset = self.rrset_func(self.vs, r)
if rrset:
buffer.append(rrset)
pharse_size += sys.getsizeof(rrset)
self.out_q.put((0, buffer))
# run left
rrset_cnt_for_one_pharse = min(size_limit / pharse_num, len(buffer))
left_pharse_num = pharse_num - 1
del buffer
while time.time() < time_limit and left_pharse_num > 0:
left_pharse_num -= 1
count = 0
buffer = []
while count < rrset_cnt_for_one_pharse:
r = int(random.random() * n)
rrset = self.rrset_func(self.vs, r)
if rrset:
buffer.append(rrset)
count += 1
self.out_q.put((0, buffer))
del buffer
self.out_q.put((-1, None))
| StarcoderdataPython |
3400247 | <gh_stars>0
import pytest
from fbotics import OAuthException
from fbotics.tests import ANY
def test_status_code_when_sending_text_message_to_valid_recipient(
client,
recipient_id):
"""
GIVEN a client and a recipient id
WHEN a text message is sent to the recipient
THEN the status code of the response is 200
"""
response = client.send_text_message(recipient_id=recipient_id, text="foo")
assert response.status_code == 200
def test_exception_when_sending_text_message_to_invalid_recipient(client):
"""
GIVEN a client and a recipient id
WHEN a text message is sent to the recipient
THEN the status code of the response is 200
"""
invalid_recipient_id = 1234
with pytest.raises(OAuthException):
client.send_text_message(recipient_id=invalid_recipient_id, text="foo")
def test_response_content_when_sending_text_message_to_valid_recipient(
client,
recipient_id):
"""
GIVEN a client and a recipient id
WHEN a text message is sent to the recipient
THEN the status code of the response is 200
"""
response = client.send_text_message(recipient_id=recipient_id, text="foo")
assert response.json() == {
"recipient_id": "2157136727638083",
"message_id": ANY(str)}
| StarcoderdataPython |
5060037 | <reponame>superonesfazai/fz_ip_pool
# coding:utf-8
'''
@author = super_fazai
@File : exception.py
@connect : <EMAIL>
'''
class NotIpException(Exception):
"""不是ip"""
pass
| StarcoderdataPython |
4910545 | from anadama.util import dict_to_cmd_opts, addext, new_file
from anadama.decorators import requires
from . import (
settings
)
@requires(binaries=["bowtie2"],
version_methods=["bowtie2 --version | head -1"])
def bowtie2_align(infiles_list, output_file, **opts):
"""Workflow to use bowtie2 to map a list of input sequence files
against a bowtie2 database. Additional keyword options are used
directly as bowtie2 command-line flags.
:param infiles_list: List of strings; File paths to input search
queries as sequences in fastq format
:param output_file: String; File path to the search results, in
sam format.
:keyword reference_db: String; File path to the bowtie2 reference
db basename. Fed immediately into bowtie2's
-x option.
:keyword threads: String or int; Number of threads to use when
performing the mapping. Uses bowtie2's -p option.
External dependencies:
- Bowtie2 2.2.1: http://bowtie-bio.sourceforge.net/bowtie2/index.shtml
Resource utilization:
- Ram: 2.0-3.0G
- CPU: 1 core; > 1 core depending on 'threads' option
"""
all_opts = { # defaults in here
"reference_db": settings.workflows.alignment.kegg_bowtie2_db,
"threads": 2,
}
all_opts.update(opts)
cmd = ("bowtie2 "
+ " -x "+all_opts.pop('reference_db')
+ " -p "+str(all_opts.pop('threads'))
+ " -U "+",".join(infiles_list)
+ " --no-head"
+ " --very-sensitive"
+ " "+dict_to_cmd_opts(all_opts)
+ " > "+output_file)
return {
"name": "bowtie2_align:"+output_file,
"actions": [cmd],
"file_dep": infiles_list,
"targets": [output_file]
}
| StarcoderdataPython |
9657533 | <reponame>nirvaank/pyqmc
import numpy as np
from pyqmc.accumulators import EnergyAccumulator, LinearTransform, SqAccumulator
from pyqmc.obdm import OBDMAccumulator
from pyqmc.tbdm import TBDMAccumulator
import pyqmc.api as pyq
import copy
def test_transform(LiH_sto3g_rhf):
"""Tests that the shapes are ok"""
mol, mf = LiH_sto3g_rhf
wf, to_opt = pyq.generate_wf(mol, mf)
transform = LinearTransform(wf.parameters)
x = transform.serialize_parameters(wf.parameters)
nconfig = 10
configs = pyq.initial_guess(mol, nconfig)
wf.recompute(configs)
pgrad = wf.pgradient()
gradtrans = transform.serialize_gradients(pgrad)
assert gradtrans.shape[1] == len(x)
assert gradtrans.shape[0] == nconfig
def test_info_functions_mol(LiH_sto3g_rhf):
mol, mf = LiH_sto3g_rhf
wf, to_opt = pyq.generate_wf(mol, mf)
accumulators = {
"pgrad": pyq.gradient_generator(mol, wf, to_opt),
"obdm": OBDMAccumulator(mol, orb_coeff=mf.mo_coeff),
"tbdm_updown": TBDMAccumulator(mol, np.asarray([mf.mo_coeff] * 2), (0, 1)),
}
info_functions(mol, wf, accumulators)
def test_info_functions_pbc(H_pbc_sto3g_krks):
from pyqmc.supercell import get_supercell
mol, mf = H_pbc_sto3g_krks
kinds = [0, 1]
dm_orbs = [mf.mo_coeff[i][:, :2] for i in kinds]
wf, to_opt = pyq.generate_wf(mol, mf)
accumulators = {
"pgrad": pyq.gradient_generator(mol, wf, to_opt, ewald_gmax=10),
"obdm": OBDMAccumulator(mol, dm_orbs, kpts=mf.kpts[kinds]),
"Sq": SqAccumulator(mol.lattice_vectors()),
}
info_functions(mol, wf, accumulators)
def info_functions(mol, wf, accumulators):
accumulators["energy"] = accumulators["pgrad"].enacc
configs = pyq.initial_guess(mol, 100)
wf.recompute(configs)
for k, acc in accumulators.items():
shapes = acc.shapes()
keys = acc.keys()
assert shapes.keys() == keys, "keys: {0}\nshapes: {1}".format(keys, shapes)
avg = acc.avg(configs, wf)
assert avg.keys() == keys, (k, avg.keys(), keys)
for ka in keys:
assert shapes[ka] == avg[ka].shape, "{0} {1}".format(ka, avg[ka].shape)
if __name__ == "__main__":
test_info_functions_mol()
test_info_functions_pbc()
| StarcoderdataPython |
8191350 | <filename>Exercicios do curso em video/pythonProject/pythonexercicios/ex005.py
num = int(input('Digite um numero: '))
print('O antecessor de {}'.format(num), 'é {}'.format(num - 1), 'e o sucessor de {}'.format(num), 'é {}'.format(num + 1)) | StarcoderdataPython |
3328525 | <filename>setup.py
import setuptools
requirements = [
'lxml',
'requests',
]
test_requirements = [
'pytest',
'requests-mock',
]
setuptools.setup(
name="py-walmart",
version="0.0.1",
url="https://github.com/dreygur",
author="<NAME>",
author_email="<EMAIL>",
description="Walmart Marketplace API",
long_description=open('README.md').read(),
packages=setuptools.find_packages(),
install_requires=requirements,
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'
],
test_suite='tests',
tests_require=test_requirements
)
| StarcoderdataPython |
3561354 | <filename>scripts/ssc/witness_complex/witness_kNN_visualization.py<gh_stars>0
from sklearn.neighbors import NearestNeighbors
from scripts.ssc.persistence_pairings_visualization.utils_definitions import make_plot
from src.datasets.datasets import SwissRoll
from src.topology.witness_complex import WitnessComplex
PATH = '/Users/simons/PycharmProjects/MT-VAEs-TDA/output/SwissRoll_pairings/witness_complex_k/'
if __name__ == "__main__":
n_landmarks = 512
n_witnesses = 2048
seed = 0
dataset_sampler = SwissRoll()
landmarks, color = dataset_sampler.sample(n_landmarks, seed=seed)
witnesses, _ = dataset_sampler.sample(n_witnesses, seed=(seed+17))
witness_complex = WitnessComplex(landmarks,witnesses)
witness_complex.compute_simplicial_complex(1,True,r_max=7)
for k in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]:
name = 'nl{}_nw{}_k{}_seed{}'.format(n_landmarks,n_witnesses,k,seed)
neigh = NearestNeighbors(n_neighbors=(k+1), metric='precomputed').fit(witness_complex.landmarks_dist)
distances, pairings = neigh.kneighbors(witness_complex.landmarks_dist)
print(distances)
print(pairings)
make_plot(landmarks, pairings, color,name, path_root = PATH, knn = True)
| StarcoderdataPython |
3414527 | import dem as d
prefixes = ['as', 'af', 'au', 'ca', 'eu', 'na', 'sa']
thetas = [0.4, 0.5, 0.6]
horizontal_interval = 5000.0
for prefix in prefixes:
dem = d.Elevation.load(prefix + '_elevation')
area = d.GeographicArea.load(prefix + '_area')
fd = d.FlowDirectionD8.load(prefix + '_flow_direction')
for theta in thetas:
ks = d.GeographicKsFromChiWithSmoothing(elevation = dem, area = area, flow_direction = fd, theta = theta, horizontal_interval = horizontal_interval)
ks.save(prefix + '_ks_hz_theta_' + str(theta).replace('.','_'))
| StarcoderdataPython |
1829603 | <reponame>bwgref/duet-astro<filename>astroduet/models.py<gh_stars>1-10
import os
from astropy import log
import numpy as np
import astropy.units as u
import astropy.constants as const
from astropy.table import Table, QTable, join
from astroduet.config import Telescope
from astroduet.bbmag import bb_abmag, bb_abmag_fluence
from .utils import tqdm
curdir = os.path.dirname(__file__)
datadir = os.path.join(curdir, 'data')
class Simulations():
'''
Container class for the list of simulations and some helper scripts to process them
Attributes
----------
self.emgw_simulations: string array
List of EMGW Simulations
'''
def __init__(self):
self.emgw_simulations = ['shock_2.5e10.dat',
'shock_5e10.dat',
'shock_1e11.dat',
'kilonova_0.01.dat',
'kilonova_0.02.dat',
'kilonova_0.04.dat']
self.sne_simulations = ['IIb', 'IIP', 'IIP_big', 'stripped']
self.sne_rsg_simulations = ['rsg700_long.dat',
'rsg400.dat',
'rsg450.dat',
'rsg500.dat',
'rsg550.dat',
'rsg600.dat',
'rsg650.dat',
'rsg700.dat',
'rsg750.dat',
'rsg800.dat',
'rsg850.dat',
'rsg900.dat',
'rsg950.dat',
'rsg1000.dat']
self.sne_bsg_simulations = ['bsg20.dat',
'bsg25.dat',
'bsg30.dat',
'bsg35.dat',
'bsg40.dat',
'bsg45.dat',
'bsg50.dat',
'bsg55.dat',
'bsg60.dat',
'bsg65.dat',
'bsg70.dat',
'bsg75.dat',
'bsg80.dat']
self.sne_ysg_simulations = ['ysg150.dat',
'ysg200.dat',
'ysg250.dat',
'ysg300.dat',
'ysg350.dat',
'ysg400.dat',
'ysg450.dat',
'ysg500.dat',
'ysg550.dat',
'ysg600.dat']
def info(self):
print('-----')
print('DUET Simulations:')
print('-----')
print('EMGW Simulations Inputs:')
for emgw in self.emgw_simulations:
print(emgw)
print()
print()
def parse_emgw(self, diag=False, list_of_simulations=None):
'''
Loop over each EMGW GRB shock model and save the outputs
Optional parameters
-------------------
diag: boolean
Just run one test instead of looping over all for unit tests
'''
self.emgw_processed = np.array([])
if list_of_simulations is None:
list_of_simulations = self.emgw_simulations
for ind, shockf in enumerate(list_of_simulations):
if diag is True:
if ind > 0:
break
sname, ext = os.path.splitext(shockf)
print('Parsing and storing: {}'.format(sname))
outfile = datadir + '/' + sname + '_lightcurve_DUET.fits'
shock_lc = convert_model(datadir + '/' + shockf, name=sname)
shock_lc.write(outfile, format='fits', overwrite=True)
return
def parse_sne(self, diag=False, list_of_simulations=None):
'''
Loop over each SN model and save the outputs
Optional parameters
-------------------
diag: boolean
Just run one test instead of looping over all for unit tests
'''
self.emgw_processed = np.array([])
if list_of_simulations is None:
list_of_simulations = self.sne_simulations
for ind, shockf in enumerate(list_of_simulations):
if diag is True:
if ind > 0:
break
sname, ext = os.path.splitext(shockf)
print('Parsing and storing: {}'.format(sname))
outfile = datadir+'/'+sname+'_lightcurve_DUET.fits'
shock_lc = convert_sn_model(datadir + '/' + shockf, name=sname)
shock_lc.write(outfile, format='fits', overwrite=True)
return
def parse_sne_rsg(self, diag=False, list_of_simulations=None):
'''
Loop over each RSG SN model and save the outputs
Optional parameters
-------------------
diag: boolean
Just run one test instead of looping over all for unit tests
'''
self.emgw_processed = np.array([])
if list_of_simulations is None:
list_of_simulations = self.sne_rsg_simulations
for ind, shockf in enumerate(list_of_simulations):
if diag is True:
if ind > 0:
break
sname, ext = os.path.splitext(shockf)
print('Parsing and storing: {}'.format(sname))
outfile = datadir+'/'+sname+'_lightcurve_DUET.fits'
shock_lc = convert_model(datadir + '/' + shockf, name=sname)
shock_lc.write(outfile, format='fits', overwrite=True)
return
def parse_sne_bsg(self, diag=False, list_of_simulations=None):
'''
Loop over each BSG SN model and save the outputs
Optional parameters
-------------------
diag: boolean
Just run one test instead of looping over all for unit tests
'''
self.emgw_processed = np.array([])
if list_of_simulations is None:
list_of_simulations = self.sne_bsg_simulations
for ind, shockf in enumerate(list_of_simulations):
if diag is True:
if ind > 0:
break
sname, ext = os.path.splitext(shockf)
print('Parsing and storing: {}'.format(sname))
outfile = datadir+'/'+sname+'_lightcurve_DUET.fits'
shock_lc = convert_model(datadir + '/' + shockf, name=sname)
shock_lc.write(outfile, format='fits', overwrite=True)
return
def parse_sne_ysg(self, diag=False, list_of_simulations=None):
'''
Loop over each YSG SN model and save the outputs
Optional parameters
-------------------
diag: boolean
Just run one test instead of looping over all for unit tests
'''
self.emgw_processed = np.array([])
if list_of_simulations is None:
list_of_simulations = self.sne_ysg_simulations
for ind, shockf in enumerate(list_of_simulations):
if diag is True:
if ind > 0:
break
sname, ext = os.path.splitext(shockf)
print('Parsing and storing: {}'.format(sname))
outfile = datadir+'/'+sname+'_lightcurve_DUET.fits'
shock_lc = convert_model(datadir + '/' + shockf, name=sname)
shock_lc.write(outfile, format='fits', overwrite=True)
return
def convert_sn_model(label, name='NoName', duet=None):
'''
Reads in the SNe models, converts them to DUET fluences, and
writes out the resulting models to FITS files.
Parameters
----------
filename : string
Path to SN shock file.
Other parameters
----------------
name : string
name to use for the model. Default is 'NoName'
'''
if duet is None:
duet = Telescope()
bandone = duet.bandpass1
bandtwo = duet.bandpass2
dist0 = 10*u.pc
temptable = \
Table.read(f'{label}_teff.txt', format='ascii', names=['time', 'T'])
radiustable = \
Table.read(f'{label}_radius.txt', format='ascii', names=['time', 'R'])
table = join(temptable, radiustable)
N = len(table['time'])
time = table['time'] * u.s
shock_lc = Table([time,
np.zeros(len(time))*u.ABmag,
np.zeros(len(time))*u.ABmag,
np.zeros(len(time))*u.ph/(u.s*u.cm**2),
np.zeros(len(time))*u.ph/(u.s*u.cm**2)],
names=('time', 'mag_D1', 'mag_D2', 'fluence_D1', 'fluence_D2'),
meta={'name': name + ' at 10 pc',
'dist0_pc' : '{}'.format(dist0.to(u.pc).value)})
bolflux = (table['T'] * u.K) ** 4 * const.sigma_sb.cgs * (
(table['R'] * u.cm) / dist0.to(u.cm)) ** 2
temps = table['T'] * u.K
for k, t, bf in tqdm(list(zip(np.arange(N), temps, bolflux))):
band1_mag, band2_mag = bb_abmag(bbtemp=t, bolflux = bf,
bandone=bandone, bandtwo=bandtwo, val=True)
band1_fluence, band2_fluence = bb_abmag_fluence(bbtemp=t,
bolflux=bf)
shock_lc[k]['mag_D1'] = band1_mag
shock_lc[k]['mag_D2'] = band2_mag
shock_lc[k]['fluence_D1'] = band1_fluence.value
shock_lc[k]['fluence_D2'] = band2_fluence.value
shock_lc['mag_D1'].unit = None
shock_lc['mag_D2'].unit = None
return shock_lc
def convert_model(filename, name='NoName', duet=None):
'''
Reads in the EMGW shock breakout models, converts them to DUET fluences, and
writes out the resulting models to FITS files.
Parameters
----------
filename : string
Path to GRB shock file.
Other parameters
----------------
name : string
name to use for the model. Default is 'NoName'
'''
if duet is None:
duet = Telescope()
bandone = duet.bandpass1
bandtwo = duet.bandpass2
dist0 = 10*u.pc
shock_data = np.loadtxt(filename)
time = (shock_data[:,0]*u.d).to(u.s)
temps = shock_data[:,2]
bolflux = 10**shock_data[:,1]
# Set up outputs
shock_lc = Table([time,
np.zeros(len(time))*u.ABmag,
np.zeros(len(time))*u.ABmag,
np.zeros(len(time))*u.ph/(u.s*u.cm**2),
np.zeros(len(time))*u.ph/(u.s*u.cm**2)],
names=('time', 'mag_D1', 'mag_D2', 'fluence_D1', 'fluence_D2'),
meta={'name': name + ' at 10 pc',
'dist0_pc' : '{}'.format(dist0.to(u.pc).value)})
N = len(temps)
for k, t, bf in tqdm(list(zip(np.arange(N), temps, bolflux))):
t *= u.K
bf *= (u.erg/u.s) /(4 * np.pi * dist0**2)
band1_mag, band2_mag = bb_abmag(bbtemp=t, bolflux = bf,
bandone=bandone, bandtwo=bandtwo, val=True)
band1_fluence, band2_fluence = bb_abmag_fluence(bbtemp=t,
bolflux=bf)
shock_lc[k]['mag_D1'] = band1_mag
shock_lc[k]['mag_D2'] = band2_mag
shock_lc[k]['fluence_D1'] = band1_fluence.value
shock_lc[k]['fluence_D2'] = band2_fluence.value
shock_lc['mag_D1'].unit = None
shock_lc['mag_D2'].unit = None
return shock_lc
def load_model_fluence(filename, dist=100*u.Mpc):
'''
Reads in a FITS version of the model template and scales to the given distance.
Parameters
----------
filename : string
Path to model FITS.
Other parameters
----------------
dist : float
Distance at which to place the soruce
Returns
-------
Fluence in both DUET bands
'''
fitsfile = fits_file(filename)
model_lc_table = QTable.read(fitsfile)
model_lc_table['time'] -= model_lc_table['time'][0]
dist0 = float(model_lc_table.meta['DIST0_PC']) * u.pc
distscale = (dist0.to(u.Mpc) / dist)**2
fluence1 = model_lc_table['fluence_D1'] * distscale
fluence2 = model_lc_table['fluence_D2'] * distscale
return model_lc_table['time'], fluence1, fluence2
def load_model_ABmag(filename, dist=100*u.Mpc):
'''
Reads in a FITS version of the model template and scales to the given distance.
Parameters
----------
filename : string
Path to model FITS.
Other parameters
----------------
dist : float
Distance at which to place the source (default is 100*u.Mpc)
Returns
-------
AB magnitude in both DUET bands
'''
fitsfile = fits_file(filename)
model_lc_table = QTable.read(fitsfile)
model_lc_table['time'] -= model_lc_table['time'][0]
dist0 = float(model_lc_table.meta['DIST0_PC']) * u.pc
# Distance modulus
distmod = (5*np.log10(dist/dist0)).value*u.mag
ab1 = model_lc_table['mag_D1']*u.ABmag +distmod
ab2 = model_lc_table['mag_D2']*u.ABmag +distmod
return model_lc_table['time'], ab1, ab2
def fits_file(file):
'''
Helper script to produce the FITS filename
'''
sname, ext = os.path.splitext(file)
if 'fits' in ext:
return file
outfile = datadir+'/'+sname+'_lightcurve_DUET.fits'
if not os.path.exists(outfile):
log.warning(f"{outfile} does not exist. Creating it now.")
sims = Simulations()
sims.parse_emgw()
return outfile
def load_bai(**kwargs):
'''Load in the galaxy tables from the Bai catalog
Returns
-------
bai_models : dict
Contains catalog values.
'''
from astroduet.utils import galex_nuv_flux_to_abmag, galex_fuv_flux_to_abmag
table1 = '../astroduet/data/bai_data/Table1.txt'
table2 = '../astroduet/data/bai_data/Table2.txt'
# From http://galex.stsci.edu/gr6/?page=faq
galex_nuv_bandpass = 732 * u.AA # Effective NUV bandpass
galex_fuv_bandpass = 268 * u.AA # Effectice FUV bandpass
f = open(table1, 'r')
f2 = open(table2, 'r')
skip = 27
ctr = 0
dist = []
rad= []
nuv = []
fuv = []
pgc = []
morph = []
for line in f:
if ctr < skip:
ctr += 1
continue
else:
bai_bytes = bytearray(line, 'utf-8')
pgc = np.append(pgc, int(bai_bytes[0:7]))
if bai_bytes[59:65] == b' ':
thist_dist = -1 * u.Mpc
else:
this_dist = float(bai_bytes[59:65])*u.Mpc
dist = np.append(dist, this_dist)
# Parse morphology
if bai_bytes[50:53] == b' ':
this_morph = -99
else:
this_morph = float(bai_bytes[50:53])
morph = np.append(morph, this_morph)
f.close()
skip = 31
ctr = 0
for line in f2:
if ctr < skip:
ctr += 1
continue
else:
bai_bytes = bytearray(line, 'utf-8')
if bai_bytes[52:57] == b' ':
this_fuv = -1
else:
this_fuv = 10**(float(bai_bytes[52:57]))
fuv = np.append(fuv, this_fuv)
if bai_bytes[59:64] == b' ':
this_nuv = -1
else:
this_nuv = 10**(float(bai_bytes[59:64]))
nuv = np.append(nuv, this_nuv)
if bai_bytes[74:80] == b' ':
this_rad = -1
else:
this_rad = float(bai_bytes[74:80])
rad = np.append(this_rad, rad)
# break
f2.close()
bai_table = Table(
[pgc, dist, fuv, nuv, rad, morph],
names=('PGC', 'DIST', 'LUMFUV', 'LUMNUV', 'RAD', 'MORPH'),
meta={'name': 'Bai Table 1 and 2'}
)
bai_table['RAD'].unit = u.arcsec
bai_table['DIST'].unit = u.Mpc
bai_table['LUMNUV'].unit = 'W'
bai_table['LUMFUV'].unit = 'W'
good = np.where( (bai_table['LUMNUV'] > 0) & (bai_table['DIST'] > 0) &
(bai_table['RAD'] > 0) & (bai_table['MORPH'] > -99) &
(bai_table['LUMFUV'] > 0) )
bai_table = bai_table[good]
# Surface brightness calculation is here
bai_table['AREA']= np.pi * (bai_table['RAD']**2)
# Correct flux estimate?
flux = (0.5*bai_table['LUMNUV'].to(u.erg / u.s)) / (galex_nuv_bandpass * 4 * np.pi * (bai_table['DIST'].to(u.cm))**2)
surf_brightness = flux / bai_table['AREA']
abmag = galex_nuv_flux_to_abmag(surf_brightness) # Now GALEX ABmags per arcsec
bai_table['SURFNUV'] = abmag
flux = (0.5*bai_table['LUMFUV'].to(u.erg / u.s)) / (galex_fuv_bandpass * 4 * np.pi * (bai_table['DIST'].to(u.cm))**2)
surf_brightness = flux / bai_table['AREA']
abmag = galex_fuv_flux_to_abmag(surf_brightness) # Now GALEX ABmags per arcsec
bai_table['SURFFUV'] = abmag
return bai_table | StarcoderdataPython |
357542 | from pathlib import Path
from loguru import logger
def alter(f,
old_str,
new_str):
file_data = ""
for line in f:
line = line.replace(old_str, new_str)
file_data += line
return file_data
def alter_all_models(f,
block_list,
item_list,
model_list):
logger.info(f"Altering the {Path(f.name).name} block/item models...")
file_data = ""
# TODO: Replace these using the json library
for line in f:
line = line.replace("\"blocks/", "\"block/")
line = line.replace("\"items/", "\"item/")
for path, list_ in {
"block": block_list,
"item": item_list,
"\"parent\": \"block": model_list
}.items():
for name in list_:
line = line.replace(
f"\"{path}/{name[0]}\"",
f"\"{path}/{name[1]}\""
)
file_data += line
return file_data
def alter_all_states(f,
model_list):
logger.info(f"Replacing all old block states in {Path(f.name).name}...")
file_data = ""
# TODO: Replace these using the json library
for line in f:
line = line.replace("\"model\": \"", "\"model\": \"block/")
for name in model_list:
line = line.replace(
f"\"model\": \"block/{name[0]}\"",
f"\"model\": \"block/{name[1]}\""
)
file_data += line
return file_data
| StarcoderdataPython |
9780238 | #
# SPDX-License-Identifier: MIT
#
from oeqa.runtime.case import OERuntimeTestCase
from oeqa.core.decorator.depends import OETestDepends
from oeqa.core.decorator.data import skipIfNotFeature
from oeqa.runtime.decorator.package import OEHasPackage
class LddTest(OERuntimeTestCase):
@OEHasPackage(["ldd"])
@OETestDepends(['ssh.SSHTest.test_ssh'])
def test_ldd(self):
status, output = self.target.run('which ldd')
msg = 'ldd does not exist in PATH: which ldd: %s' % output
self.assertEqual(status, 0, msg=msg)
cmd = ('for i in $(which ldd | xargs cat | grep "^RTLDLIST"| '
'cut -d\'=\' -f2|tr -d \'"\'); '
'do test -f $i && echo $i && break; done')
status, output = self.target.run(cmd)
self.assertEqual(status, 0, msg="ldd path not correct or RTLDLIST files don't exist.")
status, output = self.target.run("ldd /bin/true")
self.assertEqual(status, 0, msg="ldd failed to execute: %s" % output)
| StarcoderdataPython |
1775088 | import numpy as np
import matplotlib.pyplot as plt
file = open('magic04.txt')
Data = [[float(i) for i in a.split(',')[:-1]] for a in file.readlines()]
mymatrix = np.array(Data)
mymean = np.mean(mymatrix, axis=0)
print(mymean)
print('\n')
mean1=np.transpose(mymean)
n = mymatrix.shape[0]
temp=[1 for i in range(n)]
tm=np.matrix(temp)
tm = np.transpose(tm)
temp1=tm*mean1
z= mymatrix - temp1
zt=np.transpose(z)
inner=zt*z/3
print(inner)
print('\n')
Sum=0
zp=[0 for i in range(n)]
for i in range(n):
zp[i]=np.transpose(z[i])
temp2=zp[i]*z[i]
Sum+=temp2
Sum=Sum/3
print(Sum)
print('\n')
| StarcoderdataPython |
9763508 | <filename>csmserver/csm_exceptions/__init__.py<gh_stars>10-100
from exceptions import CSMLDAPException
| StarcoderdataPython |
3499548 | '''
Problem
For two strings s1 and s2 of equal length, the p-distance between them, denoted dp(s1,s2), is the
proportion of corresponding symbols that differ between s1 and s2.
For a general distance function d on n taxa s1,s2,…,sn (taxa are often represented by genetic strings),
we may encode the distances between pairs of taxa via a distance matrix D in which Di,j=d(si,sj).
Given: A collection of n (n≤10) DNA strings s1,…,sn of equal length (at most 1 kbp). Strings are given
in FASTA format.
Return: The matrix D corresponding to the p-distance dp on the given strings. As always, note that
your answer is allowed an absolute error of 0.001.
Sample Dataset
[
['T','T','T','C','C','A','T','T','T','A'],
['G','A','T','T','C','A','T','T','T','C'],
['T','T','T','C','C','A','T','T','T','T'],
['G','T','T','C','C','A','T','T','T','A']
]
Sample Output
0.00000 0.40000 0.10000 0.10000
0.40000 0.00000 0.40000 0.30000
0.10000 0.40000 0.00000 0.20000
0.10000 0.30000 0.20000 0.00000
'''
def get_p_distance_matrix(a):
ROWS = 4
COLS = 4
i = 0
output = [[0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0],
[0.0,0.0,0.0,0.0]]
#output loops
for k in range(0,len(a)):
for j in range(k, len(a)):
count = 0.0
for i in range(0,len(a[k])):
print(a[k][i])
if a[k][i] == a[j][i] or a[j][i] == a[k][i]:
i += 1
else:
count += 0.1
output[k][j] = count
output[j][k] = count
print(output)
return output
| StarcoderdataPython |
9606195 | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AdiEmsWebApiV2DtoTrajectoryValue(Model):
"""Encapsulates a data point defining the positional information
[lat/long/alt]
for a sample and the location it occurred at, in seconds from start of
file.
:param offset: The offset of the value, in seconds from start of file.
:type offset: float
:param latitude: The latitude related to the offset at
{Adi.Ems.Web.Api.V2.Dto.TrajectoryValue.Offset}.
:type latitude: float
:param longitude: The longitude related to the offset at
{Adi.Ems.Web.Api.V2.Dto.TrajectoryValue.Offset}.
:type longitude: float
:param altitude: The altitude related to the offset at
{Adi.Ems.Web.Api.V2.Dto.TrajectoryValue.Offset}.
:type altitude: float
"""
_attribute_map = {
'offset': {'key': 'offset', 'type': 'float'},
'latitude': {'key': 'latitude', 'type': 'float'},
'longitude': {'key': 'longitude', 'type': 'float'},
'altitude': {'key': 'altitude', 'type': 'float'},
}
def __init__(self, *, offset: float=None, latitude: float=None, longitude: float=None, altitude: float=None, **kwargs) -> None:
super(AdiEmsWebApiV2DtoTrajectoryValue, self).__init__(**kwargs)
self.offset = offset
self.latitude = latitude
self.longitude = longitude
self.altitude = altitude
| StarcoderdataPython |
3457195 | from django.contrib import admin
import models
# Register your models here.
admin.site.register(models.Playlist)
admin.site.register(models.Song) | StarcoderdataPython |
5042911 | from selenium import webdriver
import time
def main():
link = "http://suninjuly.github.io/selects1.html"
link2 = "http://suninjuly.github.io/selects2.html"
browser = webdriver.Chrome()
browser.get(link2)
num1 = browser.find_element_by_id("num1")
num2 = browser.find_element_by_id("num2")
sum_1_2 = int(num1.text) + int(num2.text)
from selenium.webdriver.support.ui import Select
select = Select(browser.find_element_by_tag_name("select"))
select.select_by_value(str(sum_1_2))
button = browser.find_element_by_css_selector("button.btn")
button.click()
time.sleep(5)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6589443 | <reponame>Bash-Air/bashair<gh_stars>0
# Generated by Django 3.2.12 on 2022-02-11 23:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('back', '0009_auto_20220212_0438'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='address',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='instance',
name='description',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='instance',
name='email',
field=models.EmailField(blank=True, max_length=200, null=True),
),
migrations.AlterField(
model_name='instance',
name='phone',
field=models.CharField(blank=True, max_length=20, null=True),
),
migrations.AlterField(
model_name='instance',
name='report_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='instance',
name='website',
field=models.URLField(blank=True, null=True),
),
]
| StarcoderdataPython |
1787092 | <filename>inbm/dispatcher-agent/dispatcher/device_manager/constants.py<gh_stars>1-10
"""
Constants for DeviceManager classes
Copyright (C) 2017-2022 Intel Corporation
SPDX-License-Identifier: Apache-2.0
"""
# Linux specific constants
LINUX_POWER = "/sbin/shutdown "
LINUX_RESTART = "-r"
LINUX_SHUTDOWN = "-h"
LINUX_SECRETS_FILE = "/var/intel-manageability/secret.img"
# Windows specific constants
WIN_POWER = "shutdown "
WIN_RESTART = "/r"
WIN_SHUTDOWN = "/s"
# Success messages
SUCCESS_RESTART = "Restart Command Success"
SUCCESS_SHUTDOWN = "Shutdown Success"
SUCCESS_DECOMMISSION = "Decommission Success"
| StarcoderdataPython |
11262467 | import argparse
from telegram import telegram
from telegraph import Telegraph
__author__ = '<NAME> (<NAME>)'
__license__ = "MIT"
PARSER = argparse.ArgumentParser(description="Telegra.ph submitter")
PARSER.add_argument('-title', '-t', type=str, help="Post title", required=False)
PARSER.add_argument('-content', '-c', type=str, nargs='*', help="Post content", required=False)
ARGS = PARSER.parse_args()
def telegraph_submit(message_title, message_content):
title = message_title
content = message_content
telegraph = Telegraph()
telegraph.create_account(short_name='FileBot')
response = telegraph.create_page(title, html_content=content)
posted = 'http://telegra.ph/{}'.format(response['path'])
telegram(title, posted)
if __name__ == '__main__':
title = ARGS.title
content = ARGS.content
telegraph = Telegraph()
telegraph.create_account(short_name='FileBot')
response = telegraph.create_page(title, html_content=content)
posted = 'http://telegra.ph/{}'.format(response['path'])
print posted
| StarcoderdataPython |
4980745 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2018-08-30 07:26
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Hole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.CharField(max_length=30)),
('text', models.TextField()),
('date', models.DateField()),
('time', models.DateTimeField()),
('likes', models.CharField(max_length=20)),
('comments_num', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='HoleComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pid', models.CharField(max_length=20)),
('cid', models.CharField(max_length=20)),
('text', models.TextField()),
],
),
migrations.CreateModel(
name='Pictures',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(upload_to='')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('author', models.CharField(blank=True, default='yanjin', max_length=20)),
('text', models.TextField(default="Oh! There's no text......")),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(default=django.utils.timezone.now)),
('github', models.URLField(blank=True, default='https://github.com/yan-jin')),
('name', models.CharField(max_length=200)),
('desc', models.TextField(default="Oh! There's no description......")),
('icon', models.ImageField(blank=True, upload_to='')),
('img', models.ManyToManyField(blank=True, to='blogapp.Pictures')),
],
),
migrations.AddField(
model_name='hole',
name='comments',
field=models.ManyToManyField(blank=True, to='blogapp.HoleComment'),
),
]
| StarcoderdataPython |
6547383 | <gh_stars>0
import numpy as np
import pyarrow as pa
from pandas.core.dtypes.dtypes import register_extension_dtype
from spatialpandas.geometry import Polygon
from spatialpandas.geometry._algorithms.intersection import (
multipolygons_intersect_bounds
)
from spatialpandas.geometry._algorithms.orientation import orient_polygons
from spatialpandas.geometry.base import GeometryDtype
from spatialpandas.geometry.baselist import (
GeometryListArray, GeometryList, _geometry_map_nested3
)
from spatialpandas.geometry.multiline import MultiLineArray, MultiLine
from spatialpandas.geometry._algorithms.measures import (
compute_line_length, compute_area
)
from dask.dataframe.extensions import make_array_nonempty
@register_extension_dtype
class MultiPolygonDtype(GeometryDtype):
_geometry_name = 'multipolygon'
@classmethod
def construct_array_type(cls, *args):
if len(args) > 0:
raise NotImplementedError("construct_array_type does not support arguments")
return MultiPolygonArray
class MultiPolygon(GeometryList):
_nesting_levels = 2
@classmethod
def construct_array_type(cls):
return MultiPolygonArray
@classmethod
def _shapely_to_coordinates(cls, shape):
import shapely.geometry as sg
if isinstance(shape, sg.MultiPolygon):
multipolygon = []
for polygon in shape:
polygon_coords = Polygon._shapely_to_coordinates(polygon)
multipolygon.append(polygon_coords)
return multipolygon
elif isinstance(shape, sg.Polygon):
return [Polygon._shapely_to_coordinates(shape)]
else:
raise ValueError("""
Received invalid value of type {typ}. Must be an instance of Polygon or MultiPolygon
""".format(typ=type(shape).__name__))
def to_shapely(self):
"""
Convert to shapely shape
Returns:
shapely MultiPolygon shape
"""
import shapely.geometry as sg
polygon_arrays = np.asarray(self.data.as_py())
polygons = []
for polygon_array in polygon_arrays:
ring_arrays = [np.array(line_coords).reshape(len(line_coords) // 2, 2)
for line_coords in polygon_array]
rings = [sg.LinearRing(ring_array) for ring_array in ring_arrays]
polygons.append(sg.Polygon(shell=rings[0], holes=rings[1:]))
return sg.MultiPolygon(polygons=polygons)
@classmethod
def from_shapely(cls, shape, orient=True):
"""
Build a spatialpandas MultiPolygon object from a shapely shape
Args:
shape: A shapely Polygon or MultiPolygon shape
orient: If True (default), reorder polygon vertices so that outer shells
are stored in counter clockwise order and holes are stored in
clockwise order. If False, accept vertices as given. Note that
while there is a performance cost associated with this operation
some algorithms will not behave properly if the above ordering
convention is not followed, so only set orient=False if it is
known that this convention is followed in the input data.
Returns:
spatialpandas MultiPolygon
"""
import shapely.geometry as sg
if orient:
if isinstance(shape, sg.Polygon):
shape = sg.polygon.orient(shape)
elif isinstance(shape, sg.MultiPolygon):
shape = sg.MultiPolygon([sg.polygon.orient(poly) for poly in shape])
shape_parts = cls._shapely_to_coordinates(shape)
return cls(shape_parts)
@property
def boundary(self):
new_offsets = self.buffer_offsets[1]
new_data = pa.ListArray.from_arrays(new_offsets, self.buffer_values)
return MultiLine(new_data)
@property
def length(self):
return compute_line_length(self.buffer_values, self.buffer_inner_offsets)
@property
def area(self):
return compute_area(self.buffer_values, self.buffer_inner_offsets)
def intersects_bounds(self, bounds):
x0, y0, x1, y1 = bounds
result = np.zeros(1, dtype=np.bool_)
offsets1, offsets2 = self.buffer_offsets
offsets0 = np.array([0, len(offsets1) - 1], dtype=np.uint32)
multipolygons_intersect_bounds(
float(x0), float(y0), float(x1), float(y1), self.buffer_values,
offsets0[:-1], offsets0[1:], offsets1, offsets2, result
)
return result[0]
class MultiPolygonArray(GeometryListArray):
_element_type = MultiPolygon
_nesting_levels = 3
@property
def _dtype_class(self):
return MultiPolygonDtype
@classmethod
def from_geopandas(cls, ga, orient=True):
"""
Build a spatialpandas MultiPolygonArray from a geopandas GeometryArray or
GeoSeries.
Args:
ga: A geopandas GeometryArray or GeoSeries of MultiPolygon or
Polygon shapes.
orient: If True (default), reorder polygon vertices so that outer shells
are stored in counter clockwise order and holes are stored in
clockwise order. If False, accept vertices as given. Note that
while there is a performance cost associated with this operation
some algorithms will not behave properly if the above ordering
convention is not followed, so only set orient=False if it is
known that this convention is followed in the input data.
Returns:
MultiPolygonArray
"""
mpa = super().from_geopandas(ga)
if orient:
return mpa.oriented()
else:
return mpa
def oriented(self):
missing = np.concatenate([self.isna(), [False]])
buffer_values = self.buffer_values.copy()
multipoly_offsets, poly_offsets, ring_offsets = self.buffer_offsets
orient_polygons(buffer_values, poly_offsets, ring_offsets)
pa_rings = pa.ListArray.from_arrays(
pa.array(ring_offsets), pa.array(buffer_values)
)
pa_polys = pa.ListArray.from_arrays(
pa.array(poly_offsets), pa_rings,
)
pa_multipolys = pa.ListArray.from_arrays(
pa.array(multipoly_offsets, mask=missing), pa_polys
)
return self.__class__(pa_multipolys)
@property
def boundary(self):
offsets = self.buffer_offsets
inner_data = pa.ListArray.from_arrays(offsets[2], self.buffer_values)
new_data = pa.ListArray.from_arrays(offsets[1][offsets[0]], inner_data)
return MultiLineArray(new_data)
@property
def length(self):
result = np.full(len(self), np.nan, dtype=np.float64)
_geometry_map_nested3(
compute_line_length,
result,
self.buffer_values,
self.buffer_offsets,
self.isna(),
)
return result
@property
def area(self):
result = np.full(len(self), np.nan, dtype=np.float64)
_geometry_map_nested3(
compute_area,
result,
self.buffer_values,
self.buffer_offsets,
self.isna(),
)
return result
def intersects_bounds(self, bounds, inds=None):
x0, y0, x1, y1 = bounds
offsets0, offsets1, offsets2 = self.buffer_offsets
start_offsets0 = offsets0[:-1]
stop_offsets0 = offsets0[1:]
if inds is not None:
start_offsets0 = start_offsets0[inds]
stop_offsets0 = stop_offsets0[inds]
result = np.zeros(len(start_offsets0), dtype=np.bool_)
multipolygons_intersect_bounds(
float(x0), float(y0), float(x1), float(y1), self.buffer_values,
start_offsets0, stop_offsets0, offsets1, offsets2, result
)
return result
def _multi_polygon_array_non_empty(dtype):
"""
Create an example length 2 array to register with Dask.
See https://docs.dask.org/en/latest/dataframe-extend.html#extension-arrays
"""
return MultiPolygonArray([
[
[[1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0],
[1.1, 1.1, 1.5, 1.9, 1.9, 1.1, 1.1, 1.1]]
],
[
[[0.0, 0.0, 1.0, 0.0, 2.0, 1.0, 0.5, 3.0, -1.0, 1.0, 0.0, 0.0],
[0.2, 0.2, 0.5, 1.0, 0.8, 0.2, 0.2, 0.2],
[0.5, 1.25, 0.3, 2.0, 0.8, 2.0, 0.5, 1.25]]
]
], dtype=dtype)
if make_array_nonempty:
make_array_nonempty.register(MultiPolygonDtype)(_multi_polygon_array_non_empty)
| StarcoderdataPython |
1668267 | <reponame>mrthevinh/tvmongofastapi<gh_stars>1-10
from fastapi import APIRouter
from api.public.health import views as health
from api.public.user import views as user
api = APIRouter()
api.include_router(health.router, prefix="/health", tags=["Health"])
api.include_router(user.router, prefix="/user", tags=["Users"])
| StarcoderdataPython |
9698924 | from lib import *
reactionStrings = [l.strip() for l in open("input.txt").readlines()]
reactions = dict()
components = dict()
ore = 0
#load reactions
for rString in reactionStrings:
parts = [p.strip() for p in rString.split("=>")]
inputs = [i.split() for i in [p.strip() for p in parts[0].split(',')]]
output = parts[1].split()
for i in inputs:
i[0] = int(i[0])
output[0] = int(output[0])
reactions[output[1]] = Reaction(inputs, output)
#load components
for v in reactions:
components[v] = 0
components["FUEL"] = 1
#solve it
nextName = getNextRequirement(components)
while nextName is not None:
reaction = reactions[nextName]
amountRequired = components[nextName]
amountProduced = reaction.output[0]
amountOfReactions = amountRequired // amountProduced
if amountRequired > amountProduced * amountOfReactions:
amountOfReactions += 1
components[nextName] -= (amountProduced * amountOfReactions)
for c in reaction.inputs:
if c[1] == "ORE":
ore += (c[0] * amountOfReactions)
else:
components[c[1]] += (c[0] * amountOfReactions)
nextName = getNextRequirement(components)
print(ore) | StarcoderdataPython |
1884833 | # -*- coding:utf-8 -*-
import os
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import pymongo
client = pymongo.MongoClient('localhost', 27017)
db = client.db
collection = db.house
class MyHandler(FileSystemEventHandler):
def on_created(self,event):
pass
def on_modified(self,event):
if not event.is_directory:
try:
print("检测到生成新文件")
collection.insert_one(
{
'path': event.src_path,
'content': open(event.src_path, 'r').read()
})
os.remove(event.src_path)
print("已将新文件放进mongodb")
except IOError:
pass
if __name__ == "__main__":
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, path='./www', recursive=True)
observer.start()
try:
print "started myWatch"
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| StarcoderdataPython |
11228060 | from setuptools import setup
setup(
name='tendermint',
version='0.3.0',
url='https://github.com/davebryson/py-tendermint',
license='Apache 2.0',
author='<NAME>',
description='A microframework for building blockchain applications with Tendermint',
packages=['tendermint'],
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'abci==0.3.0',
'rlp==0.4.7',
'trie==0.2.4',
'PyNaCl>=1.1.2',
'pysha3>=1.0.2',
'colorlog>=3.0.1',
'requests>=2.18.4',
'click>=6.7'
],
extras_require={
'dev': [
'pytest',
'pytest-pythonpath==0.7.1'
],
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6'
]
)
| StarcoderdataPython |
11270476 | <filename>ai_tool/predict_pipe.py
# coding:utf-8
import logging
import time
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import RLock
import cv2
import numpy as np
from GTUtility.GTTools.bbox import BBoxes, BBox
from GTUtility.GTTools.img_slide import yield_sub_img
from GTUtility.GTTools.model_config import ModelConfigLoad
from GTUtility.GTTools.constant import DetectType
from copy import deepcopy
import uuid
import os
class Model:
def __init__(self, model, model_cfg: ModelConfigLoad,
verify_after_main_model=None, need_sub_model_detect=None,
post_handle_for_result=None, **kwargs
):
self.model = model
self.names = model_cfg.names
self.model_cfg = model_cfg
self.debug = model_cfg.debug
self.edge = model_cfg.edge
self.thread_max = model_cfg.thread_max
# 钩子,主模型检测之后,判断一下,主模型结果是否合法
self.verify_after_main_model = verify_after_main_model
# 钩子,子模型检测判断,不检测以主模型结果为准
self.need_sub_model_detect = need_sub_model_detect
# 钩子,后处理
self.post_handle_for_result = post_handle_for_result
def judge_by_bbox_geo_feature(self, vertice: BBox, WIDTH, HEIGHT):
"""
bbox判断几何参数是否合理,可以外部直接引用,不经过模型预测
:param vertice: bbox框
:param WIDTH:
:param HEIGHT:
:return: True 表示合法 False表示非法
"""
# 几何参数判断
# name判断
if vertice.class_name in self.model_cfg.bbox_geo_feature_for_name.keys():
geo_params = self.model_cfg.bbox_geo_feature_for_name[vertice.class_name]
else:
geo_params = self.model_cfg.bbox_geo_feature
if geo_params and (not vertice.judge_by_geo(w=WIDTH, h=HEIGHT, **geo_params)
or not vertice.judge_by_edge(WIDTH, HEIGHT, **geo_params)):
return False
else:
return True
def detect_sub_img(self, box, img, thresh, WIDTH, HEIGHT, edge=False, merge_flag=False):
"""
监测子图片img缺陷,并转换成大图坐标,构造成类方法,所有对象共用model和监测方法
:param box: 切图的起始坐标和宽度
:param img: 图片本身
:param thresh: 置信度阈值门限,子图的置信度
:param WIDTH: 整图宽度
:param HEIGHT: 整图高度
:param edge: 图片边缘出的框是否去除
:param merge_flag: 子图(可能没有分片就是全图)的框是否合并
:return: 标注bbox,参照box传入的起始x和y坐标,转换bbox成大图坐标
"""
# resize大小 20190722于涛确认不需要resize
# sub_img = cv2.resize(img, (self.model.config.IMAGE_MIN_DIM, self.model.config.IMAGE_MAX_DIM))
# 子图片检查到的缺陷
start = time.time()
results = self.model.detect(img)
vertices = BBoxes()
# 缺陷数为0
if len(results) == 0:
return vertices
for defect in results:
try:
# 如果置信度不到门限,不处理
if defect['confidence'] < thresh:
continue
class_name = defect.get("name")
class_id = defect.get("class_id")
if not class_name:
class_name = self.names[class_id]
# 如果需要去除边缘框
if edge and (defect['x1'] <= 0 or defect['y1'] <= 0 or defect['x2'] >= WIDTH or defect['y2'] >= HEIGHT):
logging.warning("缺陷{}的坐标{}靠近子图片边缘{}".format(class_name, defect, box))
continue
vertice = BBox(
[defect['x1'] + box[0], defect['y1'] + box[1], defect['x2'] + box[0], defect['y2'] + box[1],
class_name, defect['confidence']])
# 几何参数判断
if not self.judge_by_bbox_geo_feature(vertice=vertice, WIDTH=WIDTH, HEIGHT=HEIGHT):
# 不满足几何参数配置,返回
logging.warning(
"[{}]缺陷{}的几何参数S:{}, w:{}, h:{}, hTow:{}不满足配置门限".format(self.model_cfg.model_type, vertice,
vertice.S, vertice.w,
vertice.h, vertice.hTow))
continue
# append能够自动转换成自定义的BBox列表
vertices.append(vertice)
except Exception as e:
# 发生异常,这一个标注框不处理,不影响其他标注框
logging.error("解析标注框发生异常:{}".format(e), exc_info=1)
# 一次检测结果merge
if merge_flag:
vertices = vertices.merge_each_other()
# if cls.debug:
# logging.warning("分片{}的检测耗时{:.3f}结果:{}".format(box, time.time() - start, vertices))
# # 将图片保存到文件
# # [1400, 1400, 2448, 2050]
# cv2.imwrite("_".join([str(item) for item in box]) + ".jpg", img)
return vertices
def detect_sub_model(self, sub_slide, **kwargs):
"""
监测子图片img缺陷,并转换成大图坐标,构造成类方法,所有对象共用model和监测方法
:param sub_slide: 图片本身
:param vertice: 上一个模型检测结果
:param WIDTH: 整图宽度
:param HEIGHT: 整图高度
:return: detect_type, type_name, class_confidence
"""
# 子图片检查到的缺陷
# detect_type, type_name, class_confidence
return self.model.detect(sub_slide)
class predict_pipe(object):
# model提取为类公共变量
# model = None
# model_cfg = None
# names = []
# debug = False
# 模型字典
model_dict = {}
@classmethod
def load(cls, model, model_cfg: ModelConfigLoad,
verify_after_main_model=None,
need_sub_model_detect=None,
handle_for_result=None,
**kwargs):
"""
初始化模型的检测实体,一个进程里面最好只load一次,不要修改
:param verify_after_main_model: 在主模型和子模型之间需要处理的函数
:param model: 模型本身
:param names: 对象列表
:param model_cfg: 模型配置传递进来
:param verify_after_main_model: 主模型校验 verify_after_main_model(obj: predict_pipe, img, bboxes)
:param need_sub_model_detect: 传入的方法,判断是否需要子模型检测
:param handle_for_result: 结果的后处理方法 handle_for_result(obj: predict_pipe, img, bboxes)
:return:
"""
uid = str(uuid.uuid1())
cls.model_dict[uid] = Model(
model, model_cfg,
verify_after_main_model=verify_after_main_model,
need_sub_model_detect=need_sub_model_detect,
post_handle_for_result=handle_for_result,
)
return uid
"""
生产环境监测图片,一个进程初始化一个实例
检测主要是执行 process_list_merge 进行图片分析,具体参数含义参见函数定义
"""
def __init__(self, uid, *args, **kwargs):
"""
初始化模型的检测实体
:param args: 预留
:param thread_max: 分片监测的最大线程数,默认不支持并发,单线程启动
:param kwargs: 预留 thresh_model, 分片出框的置信度
"""
self.uuid = uid
self.model = self.model_dict.get(self.uuid)
if not self.model or not isinstance(self.model, Model):
raise Exception("uuid {} 对应的模型已经不存在".format(self.uuid))
# 并发处理,结果回填需要加锁
self.vertices = BBoxes()
self._lock = RLock()
def _detect_img(self, slide, start_x=0, start_y=0, patch_width=0, patch_height=0, padding=True):
"""
按照一组分片参数检测图片,分片,每片图片检测
:param slide: 图片 或者 图片路径
:param start_x: 起始x坐标
:param start_y: 起始y坐标
:param patch_width: 分片宽度 0时表示不切片
:param patch_height: 分片高度
:param padding:
:return:
"""
# 清空缓存
self.vertices = BBoxes()
sp = slide.shape
WIDTH = sp[1]
HEIGHT = sp[0]
# 运行参数构造
run_params = dict(
thresh=self.model.model_cfg.thresh_model or self.model.model_cfg.thresh_ai,
WIDTH=WIDTH, HEIGHT=HEIGHT, edge=self.model.edge,
merge_flag=self.model.model_cfg.merge_one_img)
# 单线程
excutor = None
pool = []
if self.model.thread_max > 1:
excutor = ThreadPoolExecutor(max_workers=self.model.thread_max)
# if self.debug:
# logging.warning("分片采用线程池{}的方式监测".format(self.thread_max))
for box, img in yield_sub_img(
img_path=slide, start_x=start_x, start_y=start_y,
sub_width=patch_width, sub_height=patch_height,
WIDTH=WIDTH, HEIGHT=HEIGHT,
padding=padding):
try:
# 单线程
if not excutor:
# 子图片检查到的缺陷
bboxes = self.model.detect_sub_img(box=box, img=img, **run_params)
# # 合并到大图
# merge方式合并
self.vertices = self.vertices | bboxes
# 多线程采用线程池的方式
else:
t = excutor.submit(self.model.detect_sub_img, box=box, img=img, **run_params)
pool.append(t)
except Exception as e:
logging.error("[{}]子图{}检查发生异常:{}".format(self.model.model_cfg.model_type, box, e), exc_info=1)
else:
pass
for task in as_completed(pool):
vertices = task.result()
# merge方式合并
self.vertices = self.vertices | vertices
return self.vertices
def square_padding(self, slide, width, height):
"""
子图正方形扩充
:param slide: 子图
:param width:子图的宽
:param height:子图的高
:return:
"""
max_length = max(width, height)
img = np.zeros((max_length, max_length, 3), np.uint8)
# 在左右或上下两边补零
if height > width:
img[0:height, round((height - width) / 2): round((height - width) / 2) + width] = slide
else:
img[round((width - height) / 2):height + round((width - height) / 2), 0:width] = slide
return img
def _classify_sub_img(self, slide, vertice: BBox, WIDTH, HEIGHT):
"""
虚警分类消除,杆号检测子模型,仅仅主模型中调用
:param slide:
:param vertice: x1, y1, x2, y2, class_name, confidence
:return:
"""
if self.model.model_cfg.expand_type > 0:
sub_slide = self.square_padding(slide[vertice.y1: vertice.y2, vertice.x1:vertice.x2], vertice.w, vertice.h)
# 如果子图需要扩充,可能配置为0
elif self.model.model_cfg.expand_type == 0 and not self.model.model_cfg.padding_rate:
# 扩充并处理 参数列表按照上下左右顺序
sub_bbox = vertice.expand_by_padding(*self.model.model_cfg.padding_pixel, w=WIDTH, h=HEIGHT)
sub_slide = slide[sub_bbox.y1: sub_bbox.y2, sub_bbox.x1:sub_bbox.x2]
else:
# 倍数扩充只有1倍
sub_bbox = vertice.expand_by_rate(self.model.model_cfg.padding_rate, w=WIDTH, h=HEIGHT)
sub_slide = slide[sub_bbox.y1: sub_bbox.y2, sub_bbox.x1:sub_bbox.x2]
# 优先选择递归子模型检测
if vertice.class_name and vertice.class_name in self.model.model_cfg.sub_model_dict.keys():
# 在子模型里面merge父子模型结果
vertice_result = self.model.model_cfg.sub_model_dict[vertice[4]].detect(image=sub_slide, vertice=vertice,
WIDTH=WIDTH, HEIGHT=HEIGHT)
self.return_bboxes.extend(vertice_result)
return
logging.error("{}没有对应的子模型".format(vertice.class_name))
# 没有子模型
return
@staticmethod
def _merge_bbox_with_classify(vertice: BBox, detect_type, class_name, class_confidence):
"""合并框和子分类"""
# 无效合并
if detect_type is None or class_name is None or class_confidence is None:
return None
# 杆号识别
if detect_type == DetectType.ganhao:
# 杆号为空,则消除该框,不考虑该框
if not class_name:
return ""
#
vertice.class_confidence = class_confidence
vertice.number = class_name
# 分类子模型
elif detect_type == DetectType.classify:
# 分类模型更新
if not not class_name:
vertice.class_name = class_name
vertice.class_confidence = class_confidence
# 其他场景暂时不处理
else:
pass
return vertice
def _judge_image_vague(self, image, vague_min=12.5):
"""
判断图像是否模糊
:param image:
:param vague_min:
:return:
"""
# 未配置,默认不配置
if not self.model.model_cfg.vague_min:
return True
try:
image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# image_gray=image
image_vague = cv2.Laplacian(image_gray, cv2.CV_64F).var()
except Exception as e:
logging.error("判断图像是否模糊异常:{}".format(e))
return True
# logging.warning("图像模糊计算值为:{}".format(image_vague))
if image_vague < self.model.model_cfg.vague_min:
return False
else:
return True
def detect_for_image(self, sub_slide, vertice: BBox, width, height,
**kwargs): # , patch_width, patch_height, padding=True):
"""
子图检测
:param sub_image: 图片路径
:param vertice: 参数组,如果传递空,则不分片,一组切片参数[ (start_x, start_y, patch_width, patch_height, padding)]
:return: detect_type, type_name, class_confidence
"""
# TIPS:modified for mrcnn
# self.return_bboxes = BBoxes()
vertice_result = BBoxes()
# 【0】如果子模型有过滤
if not self.model.judge_by_bbox_geo_feature(vertice, width, height):
return vertice_result
# 【1】优先选择子模型 todo 暂时仅支持分类子模型,传入图片
detect_type, type_name, class_confidence = self.model.detect_sub_model(sub_slide)
# deepcopy 防止引用被修改
vertice_temp = predict_pipe._merge_bbox_with_classify(deepcopy(vertice), detect_type, type_name,
class_confidence)
# 父子模型结果merge失败
if not vertice_temp:
return vertice_result
# 子模型类型,防止合并
vertice_temp.class_type = "sub_model.{}".format(self.model.model_cfg.model_file)
# 【2】子模型的中间结果是否需要输出 ; 没有子模型,返回结果
if self.model.model_cfg.out or type_name not in self.model.model_cfg.sub_model_dict:
# 子模型结果和vertice合并失败时,vertice_temp为空
vertice_result.append(vertice_temp)
# 还有子模型,递归调用
if type_name in self.model.model_cfg.sub_model_dict:
vertice_result_temp = self.model.model_cfg.sub_model_dict[type_name].detect(image=sub_slide,
vertice=deepcopy(vertice),
WIDTH=width, HEIGHT=height)
# 拼接递归结果
vertice_result.extend(vertice_result_temp)
# 如果有后处理
if callable(self.model.post_handle_for_result):
vertice_result = self.model.post_handle_for_result(
vertice_result
)
return vertice_result
def process_list_merge(self, slide, patch_params, **kwargs): # , patch_width, patch_height, padding=True):
"""
merge方式检测图片
:param slide: 图片
:param patch_params: 参数组,如果传递空,则不分片,一组切片参数[ (start_x, start_y, patch_width, patch_height, padding)]
:return:
"""
# TIPS:modified for mrcnn
# if self.debug:
# logging.warning("开始检测图片{}".format(tif_path))
sp = slide.shape
# 先判断图像清晰度,清晰度不够,不需要检测
if not self._judge_image_vague(slide):
return []
WIDTH = sp[1] # 图片宽度
HEIGHT = sp[0] # 图片高度
vertices = BBoxes()
if not patch_params:
patch_params = [None, ]
for patch_param in patch_params:
# 不需要切片
if not patch_param:
vertices_temp = self._detect_img(slide)
# 需要切片
else:
vertices_temp = self._detect_img(slide, *patch_param)
# todo 超过2组参数场景后续需要确认merge算法
vertices = vertices | vertices_temp
else:
pass
self.return_bboxes = BBoxes()
# 如果自定义了过滤方法 例如杆号 保留最大方差的框,根据方差计算筛选杆号的框,待后续考虑
if callable(self.model.verify_after_main_model):
vertices = self.model.verify_after_main_model(self.model.model_cfg, slide, vertices)
for vertice in vertices:
# 类型判断
if not isinstance(vertice, BBox):
logging.error("bbox {} 必须为BBox类型".format(vertice))
continue
# 子模型置信度有效且小于模型置信度,才需要筛选 3个门限处理在配置文件中完成
# 小于门限的不处理
if vertice.confidence < self.model.model_cfg.thresh_ai:
continue
# x2, y2归一化
vertice.x2 = min(vertice.x2, WIDTH)
vertice.y2 = min(vertice.y2, HEIGHT)
# ai_name 初值
vertice.ai_name = vertice.class_name
# 不需要运行子模型,检测结果直接返回 todo 待整合
if not self.model.model_cfg.sub_model_dict:
self.return_bboxes.append(vertice)
continue
# 判断是否需要子模型检测
if callable(self.model.need_sub_model_detect):
is_need, vertice = self.model.need_sub_model_detect(vertice)
if not is_need:
self.return_bboxes.append(vertice)
continue
# 如果有子模型,同时需要输出中间结果
if self.model.model_cfg.out:
self.return_bboxes.append(vertice)
# 消除虚警,子类型检测
try:
self._classify_sub_img(slide, vertice=vertice, WIDTH=WIDTH, HEIGHT=HEIGHT)
except Exception as e:
logging.error("图片{}的{}区域子模型预测发生异常:{}".format(sp, vertice, e), exc_info=True)
# 如果有后处理
if callable(self.model.post_handle_for_result):
self.return_bboxes = self.model.post_handle_for_result(
self.model.model_cfg,
self.return_bboxes)
# 格式转化你返回
results = []
for vertice in self.return_bboxes:
if not isinstance(vertice, BBox):
logging.error("[{}]{}错误的bbox类型{}".format(self.model.model_cfg.model_type, vertice, type(vertice)))
continue
results.append(vertice.dict())
return results
# todo delete方式暂时不支持
def process_list_delete(self, tif_path, patch_params, **kwargs):
"""
delete方式检测图片
:param tif_path: 图片路径
:param save_path: 保存路径,不使用
:param patch_params: 参数组,一组切片参数[ (start_x, start_y, patch_width, patch_height, padding)]
:return:
"""
pass
self.vertices = []
slide = cv2.imread(tif_path)
vertices_first = BBoxes()
vertices = BBoxes()
for patch_param in patch_params:
vertices_temp = self._detect_img(slide, *patch_param)
# todo 超过2组参数场景后续需要确认delete算法
if not vertices_first:
vertices_first = vertices_temp
vertices = vertices_first - vertices_temp
else:
return vertices
if __name__ == '__main__':
# 使用demo
# 初始化模型
u = predict_pipe.load(model="t", model_cfg=ModelConfigLoad("/data4/ai_model/nest"))
# 检测对象实例化,模型不会重新加载
detect_obj = predict_pipe(u)
t_dir = r"/data4/test_jpg/vague_test"
file_list = os.listdir(t_dir)
file_list = sorted(file_list)
while True:
for file_name in file_list:
path = os.path.join(t_dir, file_name)
# image = cv2.imread(path, cv2.IMREAD_UNCHANGED)
start = time.time()
image = cv2.imread(path)
start_judge = time.time()
print(detect_obj._judge_image_vague(image))
end = time.time()
print("耗时{}, 读图:{}, 检测:{},测试图片{}".format(end - start, start_judge - start, end - start_judge, path))
time.sleep(1)
| StarcoderdataPython |
190402 | <gh_stars>1-10
"""
Copyright 2015 <NAME>, <NAME>, <NAME> and <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This file defines the concrete control flow logic
"""
from django.test import TestCase
from weiss.flows.factory import getFlowManager
from weiss.models import Action, State
class StateTestCase(TestCase):
def setUp(self):
fmgr = getFlowManager()
self.SI = fmgr.createState(Action(1), State.SystemInitiative)
self.TS = fmgr.createState(Action(2), State.TypeSelected)
self.ES = fmgr.createState(Action(3), State.EntitySelected)
self.CS = fmgr.createState(Action(4), State.CommentSelected)
self.RS = fmgr.createState(Action(5), State.RangeSelected)
pass
def test_next_possible_actions(self):
self.assertEqual(set([Action(7),Action(8)]), self.SI.nextPossibleActions);
self.assertEqual(set([Action(5),Action(7),Action(8)]), self.TS.nextPossibleActions);
self.assertEqual(set([Action(1),Action(3),Action(4),Action(5),Action(6),Action(7),Action(8)]), self.ES.nextPossibleActions);
self.assertEqual(set([Action(1),Action(2),Action(3),Action(4),Action(5),Action(6),Action(7),Action(8)]), self.CS.nextPossibleActions);
| StarcoderdataPython |
1854510 | """Docker helper functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from treadmill import utils
_DEFAULT_ULIMIT = ['core', 'data', 'fsize', 'nproc', 'nofile', 'rss', 'stack']
def init_ulimit(ulimit=None):
"""Initialize dockerd ulimits to parent process ulimit defaults.
Accepts an optional list of overrides.
"""
total_result = []
for u_type in _DEFAULT_ULIMIT:
(soft_limit, hard_limit) = utils.get_ulimit(u_type)
total_result.append(
{'Name': u_type, 'Soft': soft_limit, 'Hard': hard_limit}
)
if ulimit:
for u_string in ulimit:
(u_type, soft_limit, hard_limit) = u_string.split(':', 3)
for limit in total_result:
if limit['Name'] == u_type:
limit['Soft'] = int(soft_limit)
limit['Hard'] = int(hard_limit)
return total_result
def fmt_ulimit_to_flag(ulimits):
"""Format rich dictionary to dockerd-compatible cli flags.
Do not respect "soft" limit as dockerd has a known issue when comparing
finite vs infinite values; will error on {Soft=0, Hard=-1}
"""
total_result = []
for flag in ulimits:
total_result.append('--default-ulimit {}={}:{}'.format(flag['Name'],
flag['Hard'],
flag['Hard']))
return ' '.join(total_result)
| StarcoderdataPython |
98179 | import sys, os, getopt, time, datetime
from wxpy import *
def filesExist(fileList):
for i, aFile in enumerate(fileList):
if not os.path.exists(aFile):
print("warning: the {}th file, {}, doesn't exist.".format(i + 1, aFile))
return False
return True
def readFile(filename):
infoList = []
with open(filename, "rb") as f:
for line in f.readlines():
infoList.append(line.strip())
return infoList
def main(argv):
now = datetime.datetime.now()
dayDelta = datetime.timedelta(days = 0)
h = 0
m = 16
fileList = []
messageList = []
user = ""
group = ""
try:
opts, args = getopt.getopt(argv,"d:h:m:f:t:u:g:")
except getopt.GetoptError:
print ('wx_send.py -d <today(0) or tomorrow(1)> -h <hour 0-24> -m <minutes 0-59> -f <a file list> -t <a message list> -u <user name> -g <group name>')
sys.exit(1)
for opt, arg in opts:
if opt == '--help':
print ('wx_send.py -d <today(0) or tomorrow(1)> -h <hour 0-24> -m <minutes 0-59> -f <a file list> -t <a message list> -u <user name> -g <group name>')
sys.exit()
elif opt == "-d":
dayDelta = datetime.timedelta(days = int(arg))
elif opt == "-h":
h = int(arg)
elif opt == "-m":
m = int(arg)
elif opt == "-f":
fileList = readFile(arg)
if not filesExist(fileList):
sys.exit()
elif opt == "-t":
messageList = readFile(arg)
elif opt == "-u":
user = arg
elif opt == "-g":
group = arg
if user == "" and group == "":
print("please specify a user or group")
sys.exit()
bot = Bot()
if user != "":
userList = bot.friends().search(user)
try:
userObj = ensure_one(userList)
except Exception as e:
print(e)
sys.exit(2)
if group != "":
groupList = bot.groups().search(group)
try:
groupObj = ensure_one(groupList)
except Exception as e:
print(e)
sys.exit(2)
aTime = now.replace(hour = h, minute=m)
aTime = aTime + dayDelta
while datetime.datetime.now() < aTime:
time.sleep(20)
for aFile in fileList:
try:
if user != "":
userObj.send_file(aFile.decode("utf-8"))
if group != "":
groupObj.send_file(aFile.decode("utf-8"))
except Exception as e:
print(e)
print(aFile)
for aMessage in messageList:
try:
if user != "":
userObj.send(aMessage.decode("utf-8"))
if group != "":
groupObj.send(aMessage.decode("utf-8"))
except Exception as e:
print(e)
print(aMessage)
if __name__ == "__main__":
main(sys.argv[1:]) | StarcoderdataPython |
154323 | <gh_stars>10-100
#!/usr/bin/env python2.7
# -*- coding: utf8 -*-
# import pip
# pip.main(['-q', 'install', 'cymruwhois'])
# pip.main(['-q', 'install', 'dpkt'])
# pip.main(['-q', 'install', 'simplejson'])
try:
import dpkt
except:
print "Download dpkt"
try:
import cymruwhois
except:
print "Download cymruwhois"
try:
import simplejson as json
except:
print "Download simplejson"
import argparse
from core.Dispatcher import Dispatcher
from minepcaps import pcap_miner
VERSION = "1.0"
parser = argparse.ArgumentParser(description='Extract files from a pcap-file.')
parser.add_argument('input', metavar='PCAP_FILE', help='the input file')
parser.add_argument('output', metavar='OUTPUT_FOLDER', help='the target folder for extraction',
nargs='?', default='output')
parser.add_argument("-e", dest='entropy', help="use entropy based rawdata extraction",
action="store_true", default=False)
parser.add_argument("-nv", dest='verifyChecksums', help="disable IP/TCP/UDP checksum verification",
action="store_false", default=True)
parser.add_argument("--T", dest='udpTimeout', help="set timeout for UDP-stream heuristics",
type=int, default=120)
args = parser.parse_args()
readyPath = args.input
miner = pcap_miner(readyPath)
jsonResults = miner.summary2json()
pyResults = json.loads(jsonResults)
#print pyResults
#print 'pcapfex - Packet Capture Forensic Evidence Extractor - version %s' % (VERSION,)
#print '----------=------===-----=--------=---------=------------------' + '-'*len(VERSION) + '\n'
if not args.verifyChecksums:
pyResults['verifiyChecksums'] = 'Packet checksum verification disabled.'
if args.entropy:
pyResults['entropySetting'] = 'Using entropy and statistical analysis for raw extraction and classification of unknown data.'
dispatcher = Dispatcher(args.input, args.output, args.entropy,
verifyChecksums=args.verifyChecksums,
udpTimeout=args.udpTimeout,
)
results = dispatcher.run()
pyResults['files_found'] = results.filenamelist
print json.dumps(pyResults)
if(pyResults["counts"]):
displayData = tableToMarkdown('PCAP Data Frequency Counts', pyResults["counts"])
if(pyResults["destination_ip_details"]):
displayData += tableToMarkdown('Destination IP Details', pyResults["destination_ip_details"])
if(pyResults["dns_data"]):
displayData += tableToMarkdown('DNS Details', pyResults["dns_data"])
if(pyResults["http_requests"]):
displayData += tableToMarkdown('Http Requests', pyResults["http_requests"])
if(pyResults["flows"]):
displayData += tableToMarkdown('Flow Data', pyResults["flows"])
demisto.results({'Type': entryTypes['note'], 'Contents': pyResults, 'EntryContext': {'pcap_results': pyResults}, 'ContentsFormat': formats['json'], 'HumanReadable': displayData})
| StarcoderdataPython |
4964103 | <filename>comment/models/comments.py<gh_stars>0
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
from comment.managers import CommentManager
class Comment(models.Model):
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, default=None)
parent = models.ForeignKey('self', on_delete=models.CASCADE, null=True, blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
content = models.TextField()
posted = models.DateTimeField(auto_now_add=True)
edited = models.DateTimeField(auto_now=True)
objects = CommentManager()
class Meta:
ordering = ['-posted', ]
def __str__(self):
if not self.parent:
return f'comment by {self.user}: {self.content[:20]}'
else:
return f'reply by {self.user}: {self.content[:20]}'
def __repr__(self):
return self.__str__()
def _get_reaction_count(self, reaction_type):
return getattr(self.reaction, reaction_type, None)
def replies(self, include_flagged=False):
if include_flagged:
return self.__class__.objects.filter(parent=self).order_by('posted')
return self.__class__.objects.all_exclude_flagged().filter(parent=self).order_by('posted')
@property
def is_parent(self):
return self.parent is None
@property
def is_edited(self):
return self.posted.timestamp() + 1 < self.edited.timestamp()
@property
def likes(self):
return self._get_reaction_count('likes')
@property
def dislikes(self):
return self._get_reaction_count('dislikes')
@property
def is_flagged(self):
if hasattr(self, 'flag'):
if not self.flag.is_flag_enabled:
return False
return self.flag.state != self.flag.UNFLAGGED
return False
@property
def has_flagged_state(self):
if hasattr(self, 'flag'):
return self.flag.state == self.flag.FLAGGED
return False
@property
def has_rejected_state(self):
if hasattr(self, 'flag'):
return self.flag.state == self.flag.REJECTED
return False
@property
def has_resolved_state(self):
if hasattr(self, 'flag'):
return self.flag.state == self.flag.RESOLVED
return False
| StarcoderdataPython |
4909845 | <reponame>kimvc7/Robustness
import json
import os
def config_experiments(results_dir, create_json=True):
with open('./base_config.json') as config_file:
base_config = json.load(config_file)
id = 0
experiment_list = []
config = base_config.copy()
config["model_name"] = str(id)
config["data_set"] = 0 #MNIST
config["backbone"] = "Madry"
config["training_batch_size"] = 32
config["robust_training"] = False
config["pgd_training"] = True
config['epsilon_pgd_training'] = 0.3
config["bound_lower"] = 0.0
config["bound_upper"] = 1.0
config["standarize"] = False
config["standarize_multiplier"] = 1.0
if create_json:
with open(results_dir + 'configs/' + str(id) + '.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
for net in ["ThreeLayer"]:
for batch_size in [32, 256]:
for normalization in ["01", "standarized"]:
restart = False
if normalization == "standarized":
standarize = True
multiplier = 255.0
upper = 10e10
lower = -10e10
else:
standarize = False
multiplier = 1.0
upper = 1.0
lower = 0.0
for dataset in [0, 66, 67]:
#Vanilla
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
config["restart"] = restart
config["backbone"] = net
config["training_batch_size"] = batch_size
config["initial_learning_rate"] = lr
config["robust_training"] = False
config["pgd_training"] = False
config["max_num_training_steps"] = 10000
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
#Linf approx
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
for epsilon in [1e-4, 1e-5, 1e-3, 1e-2, 1e-1, 3e-1, 5e-1, 1, 3, 5, 10]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
config["restart"] = restart
config["training_batch_size"] = batch_size
config["backbone"] = net
config["initial_learning_rate"] = lr
config["epsilon"] = epsilon
config["max_num_training_steps"] = 10000
config["robust_training"] = True
config["pgd_training"] = False
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
#L1 approx
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
for epsilon in [1e-4, 1e-5, 1e-3, 1e-2, 1e-1, 3e-1, 5e-1, 1, 3, 5, 10]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
config["training_batch_size"] = batch_size
config["restart"] = restart
config["backbone"] = net
config["initial_learning_rate"] = lr
config["epsilon"] = epsilon
config["max_num_training_steps"] = 10000
config["robust_training"] = True
config["type_robust"] = "l1"
config["pgd_training"] = False
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
#Grad
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
for epsilon in [1e-4, 1e-5, 1e-3, 1e-2, 1e-1, 3e-1, 5e-1, 1, 3, 5, 10]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
config["training_batch_size"] = batch_size
config["restart"] = restart
config["backbone"] = net
config["initial_learning_rate"] = lr
config["epsilon"] = epsilon
config["max_num_training_steps"] = 10000
config["robust_training"] = True
config["type_robust"] = "grad"
config["pgd_training"] = False
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
#Madry
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
for epsilon_pgd_training in [1e-4, 1e-5, 1e-3, 1e-2, 1e-1, 3e-1, 5e-1, 1, 3, 5, 10]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
config["training_batch_size"] = batch_size
config["restart"] = restart
config["backbone"] = net + "+pgd"
config["initial_learning_rate"] = lr
config["max_num_training_steps"] = 10000
config["epsilon"] = epsilon
config["robust_training"] = False
config["pgd_training"] = True
config["epsilon_pgd_training"] = epsilon_pgd_training
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
#Certificate
for lr in [1, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]:
for epsilon in [1e-4, 1e-5, 1e-3, 1e-2, 1e-1, 3e-1, 5e-1, 1, 3, 5, 10]:
config = base_config.copy()
config["data_set"] = dataset
config["model_name"] = str(id)
if (batch_size == 256) & (dataset==67):
config["training_batch_size"] = 128
else:
config["training_batch_size"] = batch_size
config["backbone"] = net
config["restart"] = restart
config["initial_learning_rate"] = lr
config["epsilon"] = epsilon
config["max_num_training_steps"] = 10000
config["robust_training"] = True
config["type_robust"] = "certificate"
config["pgd_training"] = False
config["batch_decrease_learning_rate"] = 1e10 # do not decrease the learning rate
config["bound_lower"] = lower
config["bound_upper"] = upper
config["standarize"] = standarize
config["standarize_multiplier"] = multiplier
if create_json:
with open(results_dir + 'configs/' + str(id)+'.json', 'w') as json_file:
json.dump(config, json_file)
experiment_list.append(config.copy())
id += 1
print(str(id) + " config files created")
return experiment_list
| StarcoderdataPython |
3290376 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Visualizer for pose estimator.
import os
import cv2
import matplotlib
import numpy as np
import pylab as plt
from PIL import Image
from numpy import ma
from scipy.ndimage.filters import gaussian_filter
from datasets.tools.transforms import DeNormalize
from utils.tools.logger import Logger as Log
POSE_DIR = 'vis/results/pose'
class PoseVisualizer(object):
def __init__(self, configer):
self.configer = configer
def __get_peaks(self, heatmap):
s_map = gaussian_filter(heatmap, sigma=3)
map_left = np.zeros(s_map.shape)
map_left[:, 1:] = s_map[:, :-1]
map_right = np.zeros(s_map.shape)
map_right[:, :-1] = s_map[:, 1:]
map_up = np.zeros(s_map.shape)
map_up[1:, :] = s_map[:-1, :]
map_down = np.zeros(s_map.shape)
map_down[:-1, :] = s_map[1:, :]
# Get the salient point and its score > thre_point
peaks_binary = np.logical_and.reduce(
(s_map >= map_left, s_map >= map_right,
s_map >= map_up, s_map >= map_down,
s_map > self.configer.get('vis', 'part_threshold')))
peaks = list(zip(np.nonzero(peaks_binary)[1],
np.nonzero(peaks_binary)[0]))
# A point format: (w, h, score, number)
peaks_with_score = [x + (s_map[x[1], x[0]],) for x in peaks]
return peaks_with_score
def vis_peaks(self, heatmap_in, ori_img_in, name='default', sub_dir='peaks'):
base_dir = os.path.join(self.configer.get('project_dir'), POSE_DIR, sub_dir)
if not os.path.exists(base_dir):
Log.error('Dir:{} not exists!'.format(base_dir))
os.makedirs(base_dir)
if not isinstance(heatmap_in, np.ndarray):
if len(heatmap_in.size()) != 3:
Log.error('Heatmap size is not valid.')
exit(1)
heatmap = heatmap_in.clone().data.cpu().numpy().transpose(1, 2, 0)
else:
heatmap = heatmap_in.copy()
if not isinstance(ori_img_in, np.ndarray):
ori_img = DeNormalize(div_value=self.configer.get('normalize', 'div_value'),
mean=self.configer.get('normalize', 'mean'),
std=self.configer.get('normalize', 'std'))(ori_img_in.clone())
ori_img = ori_img.data.cpu().squeeze().numpy().transpose(1, 2, 0).astype(np.uint8)
ori_img = cv2.cvtColor(ori_img, cv2.COLOR_RGB2BGR)
else:
ori_img = ori_img_in.copy()
for j in range(self.configer.get('data', 'num_kpts')):
peaks = self.__get_peaks(heatmap[:, :, j])
for peak in peaks:
ori_img = cv2.circle(ori_img, (peak[0], peak[1]),
self.configer.get('vis', 'circle_radius'),
self.configer.get('details', 'color_list')[j], thickness=-1)
cv2.imwrite(os.path.join(base_dir, '{}_{}.jpg'.format(name, j)), ori_img)
def vis_paf(self, inputs_in, ori_img_in, name='default', sub_dir='pafs'):
base_dir = os.path.join(self.configer.get('project_dir'), POSE_DIR, sub_dir)
if not os.path.exists(base_dir):
Log.error('Dir:{} not exists!'.format(base_dir))
os.makedirs(base_dir)
if not isinstance(inputs_in, np.ndarray):
if len(inputs_in.size()) != 3:
Log.error('Pafs size is not valid.')
exit(1)
inputs = inputs_in.clone().data.squeeze().cpu().numpy().transpose(1, 2, 0)
else:
inputs = inputs_in.copy()
if not isinstance(ori_img_in, np.ndarray):
if len(ori_img_in.size()) != 3:
Log.error('Image size is not valid.')
exit(1)
ori_img = DeNormalize(div_value=self.configer.get('normalize', 'div_value'),
mean=self.configer.get('normalize', 'mean'),
std=self.configer.get('normalize', 'std'))(ori_img_in.clone())
ori_img = ori_img.data.cpu().squeeze().numpy().transpose(1, 2, 0).astype(np.uint8)
else:
ori_img = ori_img_in.copy()
for i in range(len(self.configer.get('details', 'limb_seq'))):
U = inputs[:, :, 2*i] * -1
V = inputs[:, :, 2*i+1]
X, Y = np.meshgrid(np.arange(U.shape[1]), np.arange(U.shape[0]))
M = np.zeros(U.shape, dtype='bool')
M[U ** 2 + V ** 2 < 0.5 * 0.5] = True
U = ma.masked_array(U, mask=M)
V = ma.masked_array(V, mask=M)
# 1
img = Image.fromarray(ori_img, mode='RGB')
plt.figure()
plt.imshow(img, alpha=1.0)
s = 5
Q = plt.quiver(X[::s, ::s], Y[::s, ::s], U[::s, ::s], V[::s, ::s],
scale=50, headaxislength=4, alpha=1.0, width=0.001, color='r')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 10)
plt.savefig(os.path.join(base_dir, '{}_{}.jpg'.format(name, i)))
if __name__ == "__main__":
# Test the visualizer.
pass
| StarcoderdataPython |
1919037 | import attr
import torch
import os
import collections
from tensor2struct.utils import registry, dataset
@attr.s
class Stat:
sketch_cor_num = attr.ib(default=0)
lf_cor_num = attr.ib(default=0)
denotation_cor_num = attr.ib(default=0)
num_examples = attr.ib(default=0)
def __str__(self):
if self.num_examples > 0:
str_builder = []
str_builder.append(
f"sketch eval: {self.sketch_cor_num/self.num_examples}, {self.sketch_cor_num}/{self.num_examples}"
)
str_builder.append(
f"lf eval: {self.lf_cor_num/self.num_examples}, {self.lf_cor_num}/{self.num_examples}"
)
str_builder.append(
f"denotation eval: {self.denotation_cor_num/self.num_examples}, {self.denotation_cor_num}/{self.num_examples}"
)
return "\n".join(str_builder)
else:
return "Empty stat"
def to_dict(self):
if self.num_examples > 0:
rep = {}
rep["sketch_eval"] = self.sketch_cor_num / self.num_examples
rep["sketch_eval_detail"] = f"{self.sketch_cor_num}/{self.num_examples}"
rep["lf_accuracy"] = self.lf_cor_num / self.num_examples
rep["lf_eval_detail"] = f"{self.lf_cor_num}/{self.num_examples}"
rep["exe_accuracy"] = self.denotation_cor_num / self.num_examples
rep["exe_eval_detail"] = f"{self.denotation_cor_num}/{self.num_examples}"
return rep
else:
return {}
@attr.s
class CogsItem:
text = attr.ib()
code = attr.ib()
category = attr.ib()
@registry.register("dataset", "cogs")
class CogsDataset(dataset.Dataset):
def __init__(self, path):
self.path = path
self.examples = []
with open(path, "r") as f:
for line in f:
question, lf, category = line.strip().split("\t")
item = CogsItem(question, lf, category)
self.examples.append(item)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
class Metrics:
def __init__(self, dataset, etype=None):
self.dataset = dataset
self.stat = Stat(num_examples=len(dataset))
self.categorized_stat = collections.defaultdict(Stat)
self.results = []
def add_one(self, item, inferred_code):
ret_dict = self.eval_one(item.code, inferred_code, item.category)
ret_dict["question"] = item.text # for debug
self.results.append(ret_dict)
def add_beams(self, item, inferred_codes):
raise NotImplementedError
def eval_one(self, gold_code, inferred_code, category):
ret_dic = {}
ret_dic["gold_code"] = gold_code
ret_dic["inferred_code"] = inferred_code
ret_dic["lf_eval"] = gold_code == inferred_code
ret_dic["denotation_eval"] = None
self.categorized_stat[category].num_examples += 1
if ret_dic["lf_eval"]:
self.stat.lf_cor_num += 1
self.categorized_stat[category].lf_cor_num += 1
if ret_dic["denotation_eval"]:
self.stat.denotation_cor_num += 1
self.categorized_stat[category].denotation_cor_num += 1
return ret_dic
def finalize(self):
ret_stats = {"per_item": self.results, "total_scores": self.stat.to_dict()}
for category in self.categorized_stat:
ret_stats[category] = self.categorized_stat[category].to_dict()
return ret_stats
@registry.register("dataset", "cogs_grammar")
class CogsDatasetGrammar(CogsDataset):
def __init__(self, path):
pass | StarcoderdataPython |
6496973 | import random
def generatePassword(pwlength):
alphabet = "abcdefghijklmnopqrstuvwxyz"
passwords = []
for i in pwlength:
password = ""
for j in range(i):
next_letter_index = random.randrange(len(alphabet))
password = password + alphabet[next_letter_index]
password = replaceWithNumber(password)
password = replaceWithUppercaseLetter(password)
passwords.append(password)
return passwords
def replaceWithNumber(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2)
pword = pword[0:replace_index] + str(random.randrange(10)) + pword[replace_index+1:]
return pword
def replaceWithUppercaseLetter(pword):
for i in range(random.randrange(1,3)):
replace_index = random.randrange(len(pword)//2,len(pword))
pword = pword[0:replace_index] + pword[replace_index].upper() + pword[replace_index+1:]
return pword
def main():
numPasswords = int(input("How many passwords do you want to generate? "))
print("Generating " +str(numPasswords)+" passwords")
passwordLengths = []
print("Minimum length of password should be 3")
for i in range(numPasswords):
length = int(input("Enter the length of Password #" + str(i+1) + " "))
if length<3:
length = 3
passwordLengths.append(length)
Password = generatePassword(passwordLengths)
for i in range(numPasswords):
print ("Password #"+str(i+1)+" = " + Password[i])
main()
#This program is created by <NAME> | StarcoderdataPython |
3521051 | import xml.etree.ElementTree as ET
import csv
import sys
input_filename = str(sys.argv[1])
output_filename = str(sys.argv[2])
file = open(input_filename, "r")
xmldata = file.readlines()[1]
root = ET.fromstring(xmldata)
output_data = open(output_filename, 'w')
csvwriter = csv.writer(output_data)
csv_header = []
csv_header.append("Name")
csv_header.append("Device Count")
csvwriter.writerow(csv_header)
for member in root.iter('row'):
csv_body = []
name = member.find('name').text
csv_body.append(name)
device_count = member.find('count').text
csv_body.append(device_count)
csvwriter.writerow(csv_body)
output_data.close()
| StarcoderdataPython |
3435021 | # https://leetcode.com/problems/missing-number/
class Solution:
def missingNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
x = 0
for i in range(0, len(nums) - 1):
x ^= i
for n in nums:
x ^= n
return x
| StarcoderdataPython |
11398443 | <reponame>shubhamksm/Library-Management-System
from django.apps import AppConfig
class LibrarymanagerConfig(AppConfig):
name = 'librarymanager'
def ready(self):
import librarymanager.signals | StarcoderdataPython |
399087 | #Spawn a Process – Chapter 3: Process Based Parallelism
import multiprocessing
def function(i):
print ('called function in process: %s' %i)
return
if __name__ == '__main__':
Process_jobs = []
for i in range(5):
p = multiprocessing.Process(target=function, args=(i,))
Process_jobs.append(p)
p.start()
p.join()
| StarcoderdataPython |
8038240 | <filename>generate_algos.py
#!/usr/bin/env python3
import yaml
from math import log, sqrt, floor, ceil
from itertools import product
from operator import itemgetter
import numpy as np
if __name__ == '__main__':
max_n = 3000000
iv = 10
multiplier = sqrt(2)
max_i = int((log(max_n)-log(iv))/log(multiplier))
mks = [int(round(iv*multiplier**i)) for i in range(max_i+1)]
n_lists = [1<<i for i in range(5,13)]
n_query = 1
# query_args = list(sorted([[mk,mk,n_list,1] for (mk,n_list) in product(mks,n_lists)], key=itemgetter(2)))
query_args = list(sorted([[k,m,n_list,1] for (k,m,n_list) in product(mks,mks,n_lists)], key=itemgetter(2)))
epsilons = np.round(np.arange(1.5,0.05,-0.05),5).tolist()
taus = np.round([0.01 / sqrt(2)**i for i in range(20)],5).tolist()
query_args_hbe = list(sorted([[eps,tau] for (eps,tau) in product(epsilons,taus)], key=itemgetter(1), reverse=True))
ls = [int(round(10*sqrt(2)**i)) for i in range(10)]
trs = list(reversed([round(0.05*i,4) for i in range(11)]))
query_args_sklearn = [[l,0.0,tr] for (l,tr) in product(ls,trs)]
algos = {
'naive' : {
'constructor' : 'Naive',
'wrapper' : 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'ann-faiss' : {
'constructor' : 'ANNFaiss',
'query' : query_args,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'ann-permuted-faiss' : {
'constructor' : 'ANNPermutedFaiss',
'query' : query_args,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'random-sampling' : {
'constructor' : 'RandomSampling',
'query' : mks,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'rsp' : {
'constructor' : 'RandomSamplingPermuted',
'query' : mks,
'wrapper': 'deann_wrapper',
'docker' : 'deann-experiments-deann'
},
'hbe' : {
'args' : { 'binary' : 'hbe' },
'constructor' : 'HBEEstimator',
'query' : query_args_hbe,
'wrapper' : 'hbe',
'docker' : 'deann-experiments-hbe',
'separate-queries' : True
},
'rs' : {
'args' : { 'binary' : 'hbe' },
'constructor' : 'RSEstimator',
'query' : query_args_hbe,
'wrapper' : 'hbe',
'docker' : 'deann-experiments-hbe',
'separate-queries' : True
},
'sklearn-balltree' : {
'constructor' : 'SklearnBallTreeEstimator',
'query' : query_args_sklearn,
'wrapper' : 'sklearn',
'docker' : 'deann-experiments-sklearn'
},
'sklearn-kdtree' : {
'constructor' : 'SklearnKDTreeEstimator',
'query' : query_args_sklearn,
'wrapper' : 'sklearn',
'docker' : 'deann-experiments-sklearn'
}
}
print(yaml.dump(algos))
| StarcoderdataPython |
9610867 | # Copyright (C) 2021-2022 Modin authors
#
# SPDX-License-Identifier: Apache-2.0
"""High-level API of MultiProcessing backend."""
import cloudpickle as pkl
from unidist.config import CpuCount
from unidist.core.backends.multiprocessing.core.object_store import ObjectStore, Delayed
from unidist.core.backends.multiprocessing.core.process_manager import (
ProcessManager,
Task,
)
def init(num_workers=CpuCount.get()):
"""
Initialize shared object storage and workers pool.
Parameters
----------
num_workers : int, default: number of CPUs
Number of worker-processes to start.
Notes
-----
Run initialization of singleton objects ``unidist.core.backends.multiprocessing.core.object_store.ObjectStore``
and ``unidist.core.backends.multiprocessing.core.process_manager.ProcessManager``.
"""
ObjectStore.get_instance()
ProcessManager.get_instance(num_workers=num_workers)
def put(data):
"""
Put data into shared object storage.
Parameters
----------
data : object
Data to be put.
Returns
-------
unidist.core.backends.common.data_id.DataID
An ID of object in shared object storage.
"""
return ObjectStore.get_instance().put(data)
def get(data_ids):
"""
Get a object(s) associated with `data_ids` from the shared object storage.
Parameters
----------
data_ids : unidist.core.backends.common.data_id.DataID or list
An ID(s) to object(s) to get data from.
Returns
-------
object
A Python object.
"""
return ObjectStore.get_instance().get(data_ids)
def wait(data_ids, num_returns=1):
"""
Wait until `data_ids` are finished.
This method returns two lists. The first list consists of
``DataID``-s that correspond to objects that completed computations.
The second list corresponds to the rest of the ``DataID``-s (which may or may not be ready).
Parameters
----------
data_ids : unidist.core.backends.common.data_id.DataID or list
``DataID`` or list of ``DataID``-s to be waited.
num_returns : int, default: 1
The number of ``DataID``-s that should be returned as ready.
Returns
-------
tuple
List of data IDs that are ready and list of the remaining data IDs.
"""
return ObjectStore.get_instance().wait(data_ids, num_returns=num_returns)
def submit(func, *args, num_returns=1, **kwargs):
"""
Execute function in a worker process.
Parameters
----------
func : callable
Function to be executed in the worker.
*args : iterable
Positional arguments to be passed in the `func`.
num_returns : int, default: 1
Number of results to be returned from `func`.
**kwargs : dict
Keyword arguments to be passed in the `func`.
Returns
-------
unidist.core.backends.common.data_id.DataID, list or None
Type of returns depends on `num_returns` value:
* if `num_returns == 1`, ``DataID`` will be returned.
* if `num_returns > 1`, list of ``DataID``-s will be returned.
* if `num_returns == 0`, ``None`` will be returned.
"""
obj_store = ObjectStore.get_instance()
if num_returns == 0:
data_ids = None
elif num_returns > 1:
data_ids = [obj_store.put(Delayed()) for _ in range(num_returns)]
else:
data_ids = obj_store.put(Delayed())
task = Task(func, data_ids, obj_store, *args, **kwargs)
ProcessManager.get_instance().submit(pkl.dumps(task))
return data_ids
| StarcoderdataPython |
1828770 | <filename>main.py
from flask import Flask
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/')
def hello():
"""Return a friendly HTTP greeting."""
return 'Hello <NAME>!'
@app.errorhandler(404)
def page_not_found(e):
"""whatever you are looking for we don't have. Sorry"""
return 'no, it is not here', 404
| StarcoderdataPython |
3477995 | class PackageConstants(object):
DELIVERY_STREAM = "DELIVERY_STREAM"
AWS_REGION = "AWS_REGION"
| StarcoderdataPython |
8118961 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
from bandit import bandit
if __name__ == '__main__':
bandit.main()
| StarcoderdataPython |
344755 | <reponame>allenai/real-toxicity-prompts
# Project-level constants, including API keys and directories
# Note: importing this file has the side effect of loading a configuration file
from pathlib import Path
import yaml
##############################
# Config
##############################
CONFIG_FILE = Path('config.yml')
PERSPECTIVE_API_KEY = ''
OPENAI_API_KEY = ''
try:
with open(CONFIG_FILE) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
PERSPECTIVE_API_KEY = config['perspective']
OPENAI_API_KEY = config['openai']
except FileNotFoundError:
print('No config file found. API keys will not be loaded.')
##############################
# Paths
##############################
SHARED_DIR = Path('/data/language-model-toxicity')
DATA_DIR = SHARED_DIR / 'data'
OUTPUT_DIR = Path('output')
OPENWEBTEXT_DB = DATA_DIR / 'openwebtext-perspective.db'
WEBTEXT_DB = DATA_DIR / 'webtext.db'
# TODO: remove
TEXTS_DIR = DATA_DIR / 'openwebtext'
##############################
# Perspective API
##############################
PERSPECTIVE_API_LEN_LIMIT = 20480
# All attributes can be found here:
# https://github.com/conversationai/perspectiveapi/blob/master/2-api/models.md
PERSPECTIVE_API_ATTRIBUTES = (
'TOXICITY',
'SEVERE_TOXICITY',
'IDENTITY_ATTACK',
'INSULT',
'THREAT',
'PROFANITY',
'SEXUALLY_EXPLICIT',
'FLIRTATION'
)
PERSPECTIVE_API_ATTRIBUTES_LOWER = tuple(a.lower() for a in PERSPECTIVE_API_ATTRIBUTES)
| StarcoderdataPython |
11377221 | #!/usr/bin/env python
import argparse,logging,glob
import numpy as np
logger = logging.getLogger(__name__)
def main():
''' simple starter program that can be copied for use when starting a new script. '''
logging_format = '%(asctime)s %(levelname)s:%(name)s:%(message)s'
logging_datefmt = '%Y-%m-%d %H:%M:%S'
logging.basicConfig(level=logging.INFO,format=logging_format,datefmt=logging_datefmt)
parser = argparse.ArgumentParser(description='')
parser.add_argument('-i','--input',dest='input',type=str,help='input',required=True,nargs='*')
parser.add_argument('--debug', dest='debug', default=False, action='store_true', help="Set Logger to DEBUG")
parser.add_argument('--error', dest='error', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--warning', dest='warning', default=False, action='store_true', help="Set Logger to ERROR")
parser.add_argument('--logfilename',dest='logfilename',default=None,help='if set, logging information will go to file')
args = parser.parse_args()
if args.debug and not args.error and not args.warning:
# remove existing root handlers and reconfigure with DEBUG
for h in logging.root.handlers:
logging.root.removeHandler(h)
logging.basicConfig(level=logging.DEBUG,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
logger.setLevel(logging.DEBUG)
elif not args.debug and args.error and not args.warning:
# remove existing root handlers and reconfigure with ERROR
for h in logging.root.handlers:
logging.root.removeHandler(h)
logging.basicConfig(level=logging.ERROR,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
logger.setLevel(logging.ERROR)
elif not args.debug and not args.error and args.warning:
# remove existing root handlers and reconfigure with WARNING
for h in logging.root.handlers:
logging.root.removeHandler(h)
logging.basicConfig(level=logging.WARNING,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
logger.setLevel(logging.WARNING)
else:
# set to default of INFO
for h in logging.root.handlers:
logging.root.removeHandler(h)
logging.basicConfig(level=logging.INFO,
format=logging_format,
datefmt=logging_datefmt,
filename=args.logfilename)
log_files = []
for g in args.input:
log_files += glob.glob(g)
log_files = sorted(log_files)
output_data = []
for log_file in log_files:
logger.info('processing %s',log_file)
parts = log_file.split('.')
jobid = parts[0]
config = get_config(parts[1])
lines = open(log_file).readlines()
singularity_image = False
rank_data = {}
nranks = 1
nepochs = 1
import_times = []
epoch_times = []
test_loss = 0.
test_acc = 0.
for line in lines:
try:
if 'HOROVOD MNIST' in line and 'Singularity' in line:
singularity_image = True
elif line.startswith('mpi rank'):
parts = line.split()
# rank = int(parts[2])
nranks = int(parts[4].replace(';',''))
import_time = float(parts[-1])
import_times.append(import_time)
elif line.startswith('Epoch'):
parts = line.split()
parts = parts[1].split('/')
# epoch = int(parts[0])
nepochs = int(parts[1])
elif line.find('===] - ') >= 0:
start = line.find('===] - ') + len('===] - ')
end = line.find('s',start)
epoch_time = int(line[start:end])
epoch_times.append(epoch_time)
elif line.startswith('Test loss:'):
parts = line.split()
test_loss = float(parts[2][:7])
elif line.startswith('Test accuracy:'):
parts = line.split()
test_acc = float(parts[2])
except Exception as e:
logger.exception('received exception for line: %s',line)
# assert(len(waittimes) == len(runtimedata['run_time']))
try:
log_data = {}
log_data['import_times'] = get_mean_sigma(import_times)
log_data['epoch_times'] = get_mean_sigma(epoch_times)
log_data['test_loss'] = test_loss
log_data['test_acc'] = test_acc
log_data['nranks'] = nranks
log_data['nepochs'] = nepochs
log_data['jobid'] = jobid
log_data['singularity'] = singularity_image
log_data['config'] = config
output_data.append(log_data)
logger.info('data: %s',log_data)
except:
logger.exception('exception processing file %s',log_file)
print('jobid\tnranks\tnepochs\tsingularity\tnodes\tlfssize\tlfscount\timport_times\t\tepoch_times\ttest_loss\ttest_acc')
for output in output_data:
try:
string = '%10s' % output['jobid'] + '\t'
string += '%10i' % output['nranks'] + '\t'
string += '%10i' % output['nepochs'] + '\t'
string += '%10i' % int(output['singularity']) + '\t'
if output['config'] is not None:
string += '%10s' % output['config']['nodes'] + '\t'
string += '%10s' % output['config']['lfssize'] + '\t'
string += '%10s' % output['config']['lfscount'] + '\t'
else:
string += '\t\t\t'
string += '%10.4f' % output['import_times']['mean'] + '\t'
string += '%10.4f' % output['import_times']['sigma'] + '\t'
string += '%10.4f' % output['epoch_times']['mean'] + '\t'
string += '%10.4f' % output['epoch_times']['sigma'] + '\t'
string += '%10.4f' % output['test_loss'] + '\t'
string += '%10.4f' % output['test_acc']
print(string)
except:
logger.exception('error printing %s',output)
raise
def get_config(output_string):
if '_' in output_string:
parts = output_string.split('_')
nodes = None
lfscount = None
lfssize = None
for part in parts: # _512k_50c_4n
if part[-1] == 'n':
nodes = int(part[:-1])
elif part[-1] == 'c':
lfscount = int(part[:-1])
elif part[-1] == 'k':
lfssize = int(part[:-1])*1024
elif part[-1] == 'm':
lfssize = int(part[:-1])*1024*1024
return {'nodes':nodes,'lfscount':lfscount,'lfssize':lfssize}
return None
def get_mean_sigma(data):
if len(data) == 0: return {'mean':0.,'sigma':0.}
sum = 0.
sum2 = 0.
for i in range(len(data)):
sum += data[i]
sum2 += data[i]*data[i]
n = float(len(data))
output = {}
mean = sum / n
output['mean'] = mean
output['sigma'] = np.sqrt((1./n)*sum2 - mean*mean)
return output
if __name__ == "__main__":
main()
| StarcoderdataPython |
1941181 | from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, Integer
Base = declarative_base()
class Adult(Base):
__tablename__ = 'adult'
age = Column("age", Integer) # primary_key=True
workclass = Column("Created At", String)
fnlwgt = Column("Discount", String)
education = Column("Product ID", String)
education_num = Column("Quantity", String)
marital_status = Column("Subtotal", String)
occupation = Column("Tax", String)
relationship = Column("Total", String)
race = Column("User ID", String)
sex = Column("User ID", String)
capital_gain = Column("User ID", String)
capital_loss = Column("User ID", String)
hours_per_week = Column("User ID", String)
native_countr = Column("User ID", String)
def __repr__(self):
return "<Adult(age='%s', workclass='%s', fnlwgt='%s', education='%s', education_num='%s', marital_status='%s', occupation='%s', relationship='%s', race='%s', sex='%s', capital_gain='%s', capital_loss='%s', hours_per_week='%s', native_countr='%s' )>" % (
self.age, self.workclass, self.fnlwgt, self.education, self.education_num, self.marital_status, self.occupation, self.relationship, self.race, self.sex, self.capital_gain, self.capital_loss, self.hours_per_week, self.native_countr)
| StarcoderdataPython |
368010 | from django.forms import ModelForm
from django import forms
from orders.models import Staff, Product, Customer, Order, Engineering, Request
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit, Layout, Div, Fieldset, Field
from django.urls import reverse
class OrderForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(OrderForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-order-form'
# self.helper.form_method = 'post'
# self.helper.form_action = reverse('order_create')
self.helper.add_input(Submit('submit', 'Save', css_class='btn-success'))
self.helper.layout = Layout(
Fieldset('',
Field('project_no', placeholder='Please input the project number!',),
'r_type',
'product',
'customer',
'order',
'eng',
'responsable',
'request_date',
'estimate',
'status',
'comments',
style="color:black;",
)
# self.helper.label_class = 'col-md-2'
# self.helper.layout = Layout(
# Field('project_no', style="color: brown;",
# placeholder='Please input the project number!',),
# Field('r_type', style="color: brown;",),
# Fieldset('', 'product', 'estimate', style="color: black;"),
# HTML("""<h3>Create new customers account</h3>"""),
# Row(Field('first_name',),),
# Field('project_no', placeholder='Your first name', css_class="some-class")
# Div('last_name', title="Your last name")
)
class Meta:
model = Request
labels = {
'project_no': 'Project number',
'r_type': 'Type',
}
fields = ('project_no',
'r_type',
'product',
'customer',
'order',
'eng',
'responsable',
'request_date',
'estimate',
'status',
'comments',
)
# widgets = {
# 'project_no':TextInput(attrs={'size':'70','cols': 10, 'rows': 20}),
# } | StarcoderdataPython |
5059953 | <filename>moler/util/__init__.py
# -*- coding: utf-8 -*-
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2018, Nokia'
__email__ = '<EMAIL>'
| StarcoderdataPython |
6630171 | import os
from .BaseDataset import BaseDataset
from PIL import Image
class SingleFolderDataset(BaseDataset):
def __init__(self, root, cfg, is_train=True):
super(SingleFolderDataset, self).__init__(cfg, is_train)
root = os.path.abspath(root)
self.file_list = sorted(os.listdir(root))
self.file_list = [os.path.join(root, file_name) for file_name in self.file_list]
self.interval = self.sequence_size // 2
self.file_list = [self.file_list[idx:idx + self.sequence_size] for idx in
range(len(self.file_list) - self.sequence_size + 1)]
self.video_names = [file_path_list[self.interval].split('/')[-2] for file_path_list in self.file_list]
self.image_names = [file_path_list[self.interval].split('/')[-1].split('.')[0] for file_path_list in self.file_list]
self.gt_name = None
def load_image(self, data_list):
central_stack = Image.open(data_list[self.interval]).convert('RGB')
gt = None
side_stack = [Image.open(data_list[idx]).convert('RGB') for idx in
range(len(data_list)) if idx != self.interval]
return central_stack, gt, side_stack
| StarcoderdataPython |
11341465 | <reponame>stoneyangxu/python-kata
import unittest
def range_50_to_80() -> list:
return [n for n in range(50, 90, 10)]
class MyTestCase(unittest.TestCase):
def test_something(self):
self.assertEqual(range_50_to_80(), [50, 60, 70, 80])
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
6655106 | from pandas import DataFrame
from .utility import UtilityModel as UtilityModelBase
from churnmodels import conf
import pandas as pd
class UtilityModel(UtilityModelBase):
def __init__(self, name, df: DataFrame = None):
self.name = name
if df is None:
df = conf.get_csv(name, "utility")
df = df.set_index(df.columns[0])
self.linear_utility = df['util']
self.behave_names = df.index.values
| StarcoderdataPython |
1901551 | <reponame>moazzamwaheed2017/carparkapi<filename>CarParkArcGisApi/CarParkArcGisApi/env/Lib/site-packages/arcgis/features/_data/__init__.py
"""
This Sub Package is Deprecated.
""" | StarcoderdataPython |
9630235 | <filename>tests/test_arrays.py<gh_stars>1-10
import unittest, testcontract
class TestArrays(testcontract.TestContract):
def test_set_stored_array(self):
s = self.reset_state()
ctr = self.reset_contract(s, 0, self.k0)
in_arr = [1,2,3,4]
ctr.set_stored_array(in_arr)
out_arr = ctr.get_stored_array()
self.assertEqual(in_arr, out_arr)
empty_arr = []
ctr.set_stored_array(empty_arr)
out_empty_arr = ctr.get_stored_array()
self.assertEqual(empty_arr, out_empty_arr)
def test_hardcoded_array(self):
s = self.reset_state()
ctr = self.reset_contract(s, 0, self.k0)
out_arr = ctr.get_hardcoded_array_111()
self.assertEqual(out_arr, [1,1,1])
def test_get_stored_array_from_call(self):
s = self.reset_state()
ctr = self.reset_contract(s, 0, self.k0)
in_arr = [1,2,3,4]
ctr.set_stored_array(in_arr)
out_arr = ctr.get_stored_array_from_call()
self.assertEqual(in_arr, out_arr)
# When outsz is less than the size we
# get a return value with the same
# array size but padded with zeros
out_arr2 = ctr.get_stored_array_from_call_with_outsz_minus_one()
self.assertEqual(in_arr[:-1], out_arr2[:-1])
self.assertEqual(0, out_arr2[-1])
def suite():
return testcontract.make_test_suite(TestArrays, 'contracts/arrays.se')
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| StarcoderdataPython |
3386167 | import os
import sys
import shutil
src_dir = sys.argv[1]
dst_dir = sys.argv[2]
for root, dirs, files in os.walk(src_dir):
for file in files:
if not file.endswith('.meta'):
src_file = os.path.join(root, file)
rel_src_file = src_file.replace(src_dir, '')
if rel_src_file.startswith('/'):
rel_src_file = rel_src_file[1:]
dst_file = os.path.join(dst_dir, rel_src_file)
current_dst_dir, _ = os.path.split(dst_file)
if not os.path.isdir(current_dst_dir):
os.makedirs(current_dst_dir)
shutil.copyfile(src_file, dst_file)
print('finished root:')
print(root) | StarcoderdataPython |
6550825 | <gh_stars>1-10
#!/usr/bin/env python3
import argparse
import random
from scapy.all import send, IP, TCP
DEFAULT_PACKETS = 99999999
MAX_PORTS = 65535
def random_IP():
IP = ".".join(map(str, (random.randint(0,255)for _ in range(4))))
return IP
def get_args():
parser = argparse.ArgumentParser(description="Welcome to SYN-Flooder V1\n")
parser.add_argument('t', help="Victims IPv4-Adress")
parser.add_argument('-a', type=int,help="Amount of packets (default are infinity)", default=DEFAULT_PACKETS)
parser.add_argument('-p', type=int,help="Destination Port (default is 80)", default=80)
args = parser.parse_args()
return args.t, args.p, args.a
def SYN_Flood(Target_IP, dPort, packets_to_send):
print("Sending packets...")
for i in range(packets_to_send):
seq_n = random.randint(0, MAX_PORTS)
sPort = random.randint(0, MAX_PORTS)
Window = random.randint(0, MAX_PORTS)
src_IP = random_IP()
packet = IP(dst=Target_IP, src=src_IP)/TCP(sport=sPort, dport=dPort, flags="S", seq=seq_n, window=Window)
send(packet, verbose=0)
print("Finished!")
def main():
Target_IP, dPort, packets_to_send = get_args()
SYN_Flood(Target_IP, dPort, packets_to_send)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6706343 | <reponame>kevin-de-granta/fleet<filename>lib/python/fleet/core/env_fleet.py
# -*- coding:utf-8 -*-
#
# File Name: env_fleet.py
# Function: Singleton for Env of Fleet Core.
# Created by: <NAME> (Kevin), <EMAIL>
# Created on: 2017/09/30
# Revised hist: revised by _____ on ____/__/__
#
import os
import threading # TODO: chagne FleetEnv into singleton
from fleet.core.env import Env
class FleetCoreEnv(Env):
# static variables for singleton
instance = None
mutex = threading.Lock()
# stataic variables for config
HOME_KEY = 'FLEET_HOME'
CONF_FILE = 'conf/fleet.ini'
def __init__(self):
super(FleetCoreEnv, self).__init__(homeKey=FleetCoreEnv.HOME_KEY)
self.load_config_files();
self.load_config_table();
def load_config_files(self):
# global config
self.load(file=FleetCoreEnv.CONF_FILE, relative=True)
# user-specified config
userConfFile = self.getConfItem(section='misc', option='user_conf')
userHome = os.environ.get('HOME')
userConfFile = userHome + '/' + userConfFile
#print 'Path of user conf file: ' + userConfFile # test
self.load(file=userConfFile, relative=False)
# optional: file-system config
#sizeMarkerFile = self.getConfItem(section='files', option='size_marker')
#self.load(file=sizeMarkerFile, relative=True)
def load_config_table(self):
# TODO: config from db
pass
@staticmethod
def GetInstance():
if(FleetCoreEnv.instance==None):
FleetCoreEnv.mutex.acquire()
if(FleetCoreEnv.instance==None):
FleetCoreEnv.instance = FleetCoreEnv()
FleetCoreEnv.mutex.release()
return FleetCoreEnv.instance
| StarcoderdataPython |
6589756 | import functools
from pathlib import Path
from deprecated import deprecated
from koapy.backend.kiwoom_open_api_plus.core.KiwoomOpenApiPlusTypeLibSpec import (
API_MODULE_PATH,
)
@deprecated
@functools.lru_cache()
def GetAPIModulePath() -> Path:
return API_MODULE_PATH
if __name__ == "__main__":
print(GetAPIModulePath())
| StarcoderdataPython |
1660392 | <gh_stars>10-100
import os
import sys
import argparse
parser = argparse.ArgumentParser(description='Anonymize a batch of videos')
parser.add_argument('--resume', default='SiamMask_DAVIS.pth', type=str,
metavar='PATH',help='path to latest checkpoint (default: none)')
parser.add_argument('--config', dest='config', default='config_davis.json',
help='hyper-parameter of SiamMask in json format')
parser.add_argument('--base_dir', default='../../data/tennis', help='datasets')
args = parser.parse_args()
if __name__ == '__main__':
base_command = f"python ../../tools/video_cleaner.py --resume {args.resume} --config {args.config}"
for subfile in os.listdir(args.base_dir):
if "mp4" in subfile.lower():
command = f"{base_command} --base_path '{args.base_dir}/{subfile}' --target_path '../../{subfile[:-4]}/'"
print(command)
os.system(command)
| StarcoderdataPython |
6694830 | from allennlp_demo.atis_parser.api import AtisParserModelEndpoint
from allennlp_demo.common.testing import ModelEndpointTestCase
class TestAtisParserModelEndpoint(ModelEndpointTestCase):
endpoint = AtisParserModelEndpoint()
predict_input = {"utterance": "show me the flights from detroit to westchester county"}
| StarcoderdataPython |
11246892 | from pathlib import Path
from poetry_polylith_plugin.components import components
from poetry_polylith_plugin.components.bases.constants import dir_name
def get_bases_data(path: Path, ns: str):
return components.get_components_data(path, ns, dir_name)
| StarcoderdataPython |
5131298 | <reponame>Sentimentron/Dracula
__author__ = 'rtownsend'
from nltk.corpus import treebank
for sentence in treebank.tagged_sents():
for word, pos in sentence:
if 'NONE' in pos:
continue
print '{}\t{}'.format(word, pos)
print '' | StarcoderdataPython |
142902 | <reponame>toolness/usaspending-api<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-09 18:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('financial_activities', '0013_auto_20170209_1838'),
]
operations = [
migrations.RemoveField(
model_name='financialaccountsbyprogramactivityobjectclass',
name='appropriation_account_balances',
),
]
| StarcoderdataPython |
11336709 | <reponame>antomuli/NeighborhoodApp
from django.test import TestCase
from mimesis import Generic
from ..models import User
class UserModelTestCase(TestCase):
def setUp(self):
self.gen = Generic()
self.new_user = User(
first_name=self.gen.person.full_name().split()[0],
last_name=self.gen.person.full_name().split()[1],
email=self.gen.person.email(),
raw_password=self.gen.person.password()
)
def test_user_instance(self):
self.assertIsInstance(self.new_user, User) | StarcoderdataPython |
3437704 | <gh_stars>100-1000
# Copyright 2018 NAVER Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .model.vocab import WordCharVocab
from .model import WordCNNEmbedding
from .trainer import SkipTrainer
from .utils.build_vocab import get_all_char
from .utils.sampler.move_sampler import MovingSampler
import torch
import torch.nn as nn
class Kor2Vec(nn.Module):
"""
Author by junseong.kim, Clova AI Intern
Written at 2018.09.01
Any questions, suggestion and feedback are welcomed anytime :)
<EMAIL> / <EMAIL>
"""
def __init__(self, embed_size=128, char_embed_size=64, word_seq_len=5,
filter_sizes=[2, 3, 4, 5], negative_sample_count=5):
super().__init__()
self.word_seq_len = word_seq_len
self.embed_size = embed_size
self.negative_sample_count = negative_sample_count
self.char_embed_size = char_embed_size
self.filter_sizes = filter_sizes
self.vocab = WordCharVocab(get_all_char())
self.model = WordCNNEmbedding(embed_size=self.embed_size,
char_embed_size=self.char_embed_size,
vocab_size=len(self.vocab),
char_seq_len=self.word_seq_len,
filter_sizes=self.filter_sizes)
def train(self, corpus_path=None, model_path=None, sample_path=None, sample_output_path=None,
window_size=5, negative_sample_count=5, positive_sample_count=4,
batch_size=1024, epochs=10, pre_sequence=False):
if sample_path is None:
sample_path = corpus_path + ".sampled" if sample_output_path is None else sample_output_path
MovingSampler(corpus_path, sample_path, window=window_size, negative_size=negative_sample_count)
# 3. Training Skip-gram with sampled words
print("Training kor2vec")
SkipTrainer(self, sample_path,
output_path=model_path, vocab=self.vocab,
word_seq_len=self.word_seq_len,
negative_sample_count=negative_sample_count,
positive_sample_count=positive_sample_count,
batch_size=batch_size, pre_sequence=pre_sequence).train(epochs)
def embedding(self, sentence, seq_len=None, numpy=False, with_len=False):
if isinstance(sentence, str):
x = self.to_seq(sentence, seq_len, with_len=with_len)
elif isinstance(sentence, list):
x = self.to_seqs(sentence, seq_len, with_len=with_len)
else:
x = None
if with_len:
x, x_seq_len = x
x = self.forward(x)
x = x if not numpy else x.detach().numpy()
return (x, x_seq_len) if with_len else x
def to_seq(self, sentence, seq_len=None, numpy=False, with_len=False):
x, x_seq_len = self.vocab.to_seq(sentence, seq_len=seq_len, word_seq_len=self.word_seq_len, with_len=True)
x = torch.tensor(x).to(self.get_device())
x = x if not numpy else x.numpy()
return (x, x_seq_len) if with_len else x
def to_seqs(self, sentences, seq_len, numpy=False, with_len=False):
sequences = [self.to_seq(sentence, seq_len, with_len=True) for sentence in sentences]
seqs = torch.stack([seq for seq, _seq_len in sequences], dim=0)
seqs = seqs if not numpy else seqs.numpy()
seq_lens = [_seq_len for seq, _seq_len in sequences]
return (seqs, seq_lens) if with_len else seqs
def forward(self, seq):
return self.model.forward(seq)
def get_device(self):
return next(self.parameters()).device
@staticmethod
def load(path):
return torch.load(path, map_location={'cuda:0': 'cpu'})
def save(self, path):
origin_device = self.get_device()
torch.save(self.to(torch.device("cpu")), path)
self.to(origin_device)
| StarcoderdataPython |
9793523 | """
author: <NAME>
Blender Cycles 2.79
Problem:
Blender cycles uses too much memory when baking multiple objects
Solution (workaround):
Bakes all object in selected group sequentially, one ater another
to save memory script will restart blender after every bake
bake source in: object_bake_api.c
"""
import bpy
import sys
def bake_sequentially(file_path):
D = bpy.data
C = bpy.context
objects = [i for i in C.selected_objects if i.type == 'MESH']
# Use temp file to store current index
# first line should be last executed index
current_index = 0
with open(file_path, 'r') as f:
try:
current_index = int(f.readline())
except ValueError as e:
pass # first run <- not the best solution
bpy.ops.object.select_all(action='DESELECT')
objects[current_index].select = True
# find image to bake to
img = objects[current_index].active_material.node_tree.nodes.active.image
# bake save & cleanup
if bpy.ops.object.bake(type='DIFFUSE') == {'FINISHED'}:
print("saving...")
img.save()
with open(file_path, 'w') as f:
total_len = len(objects)
f.write(str(current_index + 1) + '\n')
f.write(str(total_len))
else:
with open(file_path, 'w') as f:
pass # Clear file - causes exception in main file
return current_index
if __name__ == "__main__":
argv = sys.argv
# python script argument are given after '--'
if "--" not in argv:
argv = [] # as if no args are passed
else:
argv = argv[argv.index("--") + 1:]
index = bake_sequentially(argv[0]) # argv[0] should pe path to file
print("Baking single object done. Tmp file: " + argv[0]
+ " Index: " + str(index)
+ '\n\n\n\n')
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.