id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
3280828 | <gh_stars>1000+
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 Kyoto University (<NAME>)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import codecs
import re
import regex
parser = argparse.ArgumentParser()
parser.add_argument("text", type=str, help="text file")
args = parser.parse_args()
def main():
p_kanji = regex.compile(r".*\p{Script=Han}+.*")
p_hiragana = regex.compile(r".*\p{Block=Hiragana}+.*")
p_katakana = regex.compile(r".*\p{Block=Katakana}+.*")
p_chinese = re.compile(u".*[\u4e00-\u9fa5]+.*")
p_korean = re.compile(u".*[\uac00-\ud7ff]+.*")
p_arabic = regex.compile(r".*\p{Block=Arabic}+.*")
p_cyrillic = regex.compile(r".*\p{Block=Cyrillic}+.*")
p_sanskrit = regex.compile(r".*\p{Block=Devanagari}+.*")
p_egyptian = regex.compile(r".*\p{Block=Egyptian_Hieroglyphs}+.*")
p_ethiopic = regex.compile(r".*\p{Block=Ethiopic}+.*")
p_hebrew = regex.compile(r".*\p{Block=Hebrew}+.*")
p_armenian = regex.compile(r".*\p{Block=Armenian}+.*")
p_thai = regex.compile(r".*\p{Block=Thai}+.*")
p_bengali = regex.compile(r".*\p{Block=Bengali}+.*")
p_myanmer = regex.compile(r".*\p{Block=Myanmar}+.*")
p_geogian = regex.compile(r".*\p{Block=Georgian}+.*")
p_lao = regex.compile(r".*\p{Block=Lao}+.*")
# exception
def is_dhivehi(text):
return "މާވަށް" in text
with codecs.open(args.text, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
sentence = " ".join(line.split(" ")[1:])
if (
p_kanji.match(sentence) is None
and p_hiragana.match(sentence) is None
and p_katakana.match(sentence) is None
and p_chinese.match(sentence) is None
and p_korean.match(sentence) is None
and p_arabic.match(sentence) is None
and p_cyrillic.match(sentence) is None
and p_sanskrit.match(sentence) is None
and p_egyptian.match(sentence) is None
and p_ethiopic.match(sentence) is None
and p_hebrew.match(sentence) is None
and p_armenian.match(sentence) is None
and p_thai.match(sentence) is None
and p_bengali.match(sentence) is None
and p_myanmer.match(sentence) is None
and p_geogian.match(sentence) is None
and p_lao.match(sentence) is None
and not is_dhivehi(sentence)
):
print(line)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3482755 | from rest_framework import generics
from rest_framework.generics import get_object_or_404
from .models import Course, Evaluate
from .serializers import CourseSerializer, EvaluateSerializer
# ListApiView - Get
# ListCreateApiView - Get / Post
class CourseApiView(generics.ListCreateAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class EvaluateApiView(generics.ListCreateAPIView):
queryset = Evaluate.objects.all()
serializer_class = EvaluateSerializer
def get_queryset(self):
if self.kwargs.get('course_pk'):
return self.queryset.filter(course_id=self.queryset.get('course_id'))
return self.queryset.all()
# RetrieveUpdateDestroyAPIView - Get/Put/Patch/Delete
# Update and Delete needs ID
class CourseApiManage(generics.RetrieveUpdateDestroyAPIView):
queryset = Course.objects.all()
serializer_class = CourseSerializer
class EvaluateApiManage(generics.RetrieveUpdateDestroyAPIView):
queryset = Evaluate.objects.all()
serializer_class = EvaluateSerializer
def get_object(self):
print(self.kwargs)
if self.kwargs.get('course_pk') and self.kwargs.get('evaluate_pk'):
return get_object_or_404(self.get_queryset(), course_id=self.kwargs.get('course_pk'), pk=self.kwargs.get('evaluate_pk'))
elif self.kwargs.get('course_pk'):
return get_object_or_404(self.get_queryset(), pk=self.kwargs.get('course_pk'))
return get_object_or_404(self.get_queryset(), pk=self.kwargs.get('evaluate_pk'))
| StarcoderdataPython |
5083656 | <gh_stars>0
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from novaclient import extension
from novaclient.tests import utils
from novaclient.tests.v1_1.contrib import fakes
from novaclient.v1_1.contrib import baremetal
extensions = [
extension.Extension(baremetal.__name__.split(".")[-1], baremetal),
]
cs = fakes.FakeClient(extensions=extensions)
class BaremetalExtensionTest(utils.TestCase):
def test_list_nodes(self):
nl = cs.baremetal.list()
cs.assert_called('GET', '/os-baremetal-nodes')
for n in nl:
self.assertIsInstance(n, baremetal.BareMetalNode)
def test_get_node(self):
n = cs.baremetal.get(1)
cs.assert_called('GET', '/os-baremetal-nodes/1')
self.assertIsInstance(n, baremetal.BareMetalNode)
def test_create_node(self):
n = cs.baremetal.create("service_host", 1, 1024, 2048,
"aa:bb:cc:dd:ee:ff")
cs.assert_called('POST', '/os-baremetal-nodes')
self.assertIsInstance(n, baremetal.BareMetalNode)
def test_delete_node(self):
n = cs.baremetal.get(1)
cs.baremetal.delete(n)
cs.assert_called('DELETE', '/os-baremetal-nodes/1')
def test_node_add_interface(self):
i = cs.baremetal.add_interface(1, "bb:cc:dd:ee:ff:aa", 1, 2)
cs.assert_called('POST', '/os-baremetal-nodes/1/action')
self.assertIsInstance(i, baremetal.BareMetalNodeInterface)
def test_node_remove_interface(self):
cs.baremetal.remove_interface(1, "bb:cc:dd:ee:ff:aa")
cs.assert_called('POST', '/os-baremetal-nodes/1/action')
def test_node_list_interfaces(self):
cs.baremetal.list_interfaces(1)
cs.assert_called('GET', '/os-baremetal-nodes/1')
| StarcoderdataPython |
61520 | """
Class to crawl answers mail.ru
"""
import sqlite3
import requests
import re
from bs4 import BeautifulSoup as bs
class Crawler(object):
def __init__(self, categories='all', timeline = 'all', verbose=True,
schema_name='schema.sql', db_name='q_database.sqlt',
bs_features='lxml'):
"""
init method for Crawler
:params:
categories -- (list) -- categories that should be downloaded
-- default val:'all' -- downloads all questions
timeline -- (tuple of timestamp)-- download from timeline[0] to timeline[1]
-- default val:'all' -- downloads all questions
verbose -- (bool) -- if program should output progress
schema_name-- (str) -- name of sql file that describes
structure of database
-- default val:'schema.sql'
db_name -- (str) -- name of database
-- default val:'q_database.sqlt'
bs_features-- (str) -- BeautifulSoup engine to parse html page
Look up https://www.crummy.com/software/BeautifulSoup/bs4/doc/ *Installing parser* section
It explains things about parsers
In short, if something goes wrong, change to 'html.parser'
-- deafult val:'lxml'
"""
self.categories = categories
self.timeline = timeline
self.verbose = verbose
self.schema_name = schema_name
self.db_name = db_name
self.bs_features=bs_features
self.__mail_page = 'https://otvet.mail.ru'
self.__exclude = ['Золотой фонд', 'О проектах Mail.Ru', 'Другое']
self.__reg_q_number = re.compile('[\d]+')
def __get_cats2sql(self, cats):
"""Stupid (dog) fuction to prepare data for sql"""
if self.categories != 'all':
return [(str(j), #id; autoincrement
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text in self.categories
and itm.text not in self.__exclude]
else:
return [(str(j), #id; autoincrement
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text not in self.__exclude]
def __get_subcats2sql(self, cats, i, parent_name, start_id):
"""Stupid (dog) fuction to prepare data for sql
i -- id of parent category
"""
if self.categories != 'all':
return [(str(start_id + j), #id; autoincrement
str(i), #parent_id
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text in self.categories
and itm.text not in self.__exclude
and parent_name not in self.__exclude
and itm.text not in self.parent_cats]
else:
return [(str(start_id + j), #id; autoincrement
str(i), #parent_id
'\'' + itm.text + '\'', #name
'\'' + itm['href'] + '\'') #link
for j, itm in enumerate(cats)
if itm.text not in self.__exclude
and parent_name not in self.__exclude
and itm.text not in self.parent_cats]
def __fetch_latest_question_id(self):
"""
Loads main page of `otvet.mail.ru` and gets `id` of latest question.
Then sets it to `self.latest_question` and returns this values
"""
page = self.get_page(params=['/open/'])
soup = bs(page, self.bs_features)
latest_q = soup.find('a', 'blue item__text')
self.latest_question = self.__reg_q_number.search(latest_q['href']).group(0)
return self.latest_question
def __is_valid_page(self, soup):
"""Checks if page contains 'Вопрос не найден' """
# TODO: add time constrains
content = soup.find('div', 'b-page__content')
if content:
if content.text == 'Вопрос не найден..':
return False
else:
category = soup.find('a', 'black list__title list__title').text.strip()
if category not in self.__exclude:
if self.categories == 'all' or category in self.categories:
return True
return False
def __select_id_from(self, table, like):
like_s = like.strip()
c = self.db.cursor()
query = c.execute('SELECT `id` FROM {} \
WHERE `name` LIKE \'{}\''.format(table, like_s))
category_id = query.fetchone()[0]
self.db.commit()
return category_id
def __get_selected_category_and_sub(self, soup):
category = soup.find('a', 'black list__title list__title')
sub_category = soup.find('a', 'medium item item_link selected')
cat_id = self.__select_id_from('categories', category.text)
if sub_category:
sub_cat_id = self.__select_id_from('sub_categories', sub_category.text)
else:
sub_cat_id = None
return cat_id, sub_cat_id
def get_db(self):
"""Returns database if exist or creates one and returns it"""
if not hasattr(self, 'db'):
self.db = sqlite3.connect(self.db_name)
self.db.row_factory = sqlite3.Row
return self.db
def init_db(self):
"""Initilizes database with sql file"""
self.get_db()
with open(self.schema_name, 'r') as f:
self.db.executescript(f.read())
self.db.commit()
def close_db(self):
"""Closes connection to database"""
if hasattr(self, 'db'):
self.db.close()
def get_page(self, params=None):
"""
Gets page with url self.__mail_page + params.
params usually would be ['questions', question_id]
:returns: string of page or None if 404 or something
"""
if params:
url = self.__mail_page + ''.join(params)
else:
url = self.__mail_page
r = requests.get(url)
if r.status_code == 200:
return r.text
else:
return None
def add_to_database(self, table, items):
"""Add tuples from *items* to *table*"""
try:
c = self.db.cursor()
for item in items:
item_for_db = ', '.join(item)
print(item_for_db)
c.execute('INSERT INTO {t} VALUES({i})'.format(t=table, i=item_for_db))
self.db.commit()
except:
raise sqlite3.Error('Unable to insert items into {}'.format(table))
def get_categories(self, page=None):
"""
Downloads parent categories
:param: page -- (list) -- should be either ['/name_of_categry/'] to get subcategories
or None to get parent categories
-- default val:None
:returns: (list) -- list of <a>...</a> with names of categories and links to them
"""
# getting main page
text_page = self.get_page(page)
soup = bs(text_page, self.bs_features)
# searching for categories
categories = soup.find_all('a', 'medium item item_link')
# adding categories to db and return list
return categories
def add_categories_to_db(self):
"""
Downloads categories and subcategories and saves them to database
"""
categories = self.get_categories()
# itm looks like this: <a class="medium item item_link" href="/autosport/" name="">Автоспорт</a>,
# so we are getting text = Автоспорт and 'href' = /autosport/
cats2sql = self.__get_cats2sql(categories)
self.add_to_database(table='categories', items=cats2sql)
self.parent_cats = [cat.text for cat in categories]
sub2sql = []
j = 0
for i, c in enumerate(categories):
par_name = c.text
href = c['href']
sub_categories = self.get_categories(page=href)
sub2sql.extend(self.__get_subcats2sql(sub_categories, i, par_name, j))
j += len(sub_categories)
self.add_to_database(table='sub_categories',
items=sub2sql)
def get_latest_question_id(self):
"""Gets latest_question from database. If there is None, fetch one from web."""
c = self.db.cursor()
resp = c.execute('SELECT max(`id`) FROM questions')
latest_q = resp.fetchone()
self.db.commit()
if latest_q:
self.latest_question = latest_q[0]
return latest_q[0]
else:
return self.__fetch_latest_question_id()
def fetch_pages(self, from_id, to_id):
"""
Genrator for pages. Yields soup object only if page exists and valid.
:params:
from_id -- (int) -- Number of question to start from
to_id -- (int) -- Number of last question
:yields:
(page_id, BeautifulSoup object) tuple
"""
for p_id in range(from_id, to_id):
page = self.get_page(['/question/', '{}/'.format(p_id)])
# if error 404, get_page returns None
if page:
# Checking if page contains "Вопрос не найден"
soup = bs(page, self.bs_features)
if self.__is_valid_page(soup):
yield(p_id, soup)
def retrieve_data(self, soup_page):
"""
Gets tuples of relevant data from BeautifulSoup parsed page
:params:
soup_page -- (str) -- BeautifulSoup parsed page
:returns:
tuple of title, category_id, sub_category_id, comment_from_author, answers
"""
title = soup_page.find('h1', 'q--qtext').text
cat_id, sub_cat_id = self.__get_selected_category_and_sub(soup_page)
raw_comments = soup_page.find_all('div', 'q--qcomment medium')
if raw_comments:
comments = ' '.join([q.text for q in raw_comments])
else:
comments = None
raw_answers = soup_page.find_all('div', 'a--atext atext')
if raw_answers:
answers = [a.text for a in raw_answers]
else:
answers = None
return title, cat_id, sub_cat_id, comments, answers
def download_all_questions(self):
for i, page in self.fetch_pages(0, 10):
title, cat_id, sub_cat_id, text, answers = self.retrieve_data(page)
c = self.db.cursor()
q_4_db = (str(i), str(cat_id), str(sub_cat_id), str(title), str(text))
c.execute('INSERT INTO questions VALUES(?, ?, ?, ?, ?)', q_4_db)
for a in answers:
a_4_db = (str(i), str(a))
c.execute('INSERT INTO answers(`question_id`, `a_text`) VALUES(?, ?)', a_4_db)
| StarcoderdataPython |
9755943 | # Generated by Django 2.1.1 on 2019-07-16 00:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blackswan', '0008_auto_20190716_0044'),
]
operations = [
migrations.RemoveField(
model_name='user',
name='first_name',
),
migrations.RemoveField(
model_name='user',
name='last_name',
),
migrations.AddField(
model_name='workflowexecution',
name='username_pr',
field=models.CharField(default='NA', max_length=256),
),
]
| StarcoderdataPython |
1623332 | <filename>shift_detector/precalculations/binning_precalculation.py
import pandas as pd
from pandas import DataFrame
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.utils.column_management import ColumnType, is_categorical
class BinningPrecalculation(Precalculation):
def __init__(self, bins=50):
self.bins = bins
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.bins == other.bins
def __hash__(self):
return hash(tuple([self.__class__, self.bins]))
def process(self, store):
"""
Bin the numerical columns of the datasets that do not
suffice the categorical criterion.
:param store: Store
:return: The binned version of numerical columns
"""
df1_numerical, df2_numerical = store[ColumnType.numerical]
columns = store.column_names(ColumnType.numerical)
df1_size = len(df1_numerical)
df2_size = len(df2_numerical)
dfs = pd.concat([df1_numerical, df2_numerical])
dfs_binned = DataFrame()
for column_name in columns:
column = dfs[column_name]
if is_categorical(column):
dfs_binned[column_name] = column
else:
column_name_binned = "{}_binned".format(column_name)
dfs_binned[column_name_binned] = pd.cut(column, self.bins)
df1_binned = dfs_binned.head(df1_size)
df2_binned = dfs_binned.tail(df2_size)
return df1_binned, df2_binned
| StarcoderdataPython |
4838535 | <reponame>0x8b/HackerRank
#!/usr/bin/env python
import fileinput
import re
from collections import Counter, defaultdict
from operator import itemgetter
c = re.compile(r"(\w+)")
values = {}
children = defaultdict(list)
lines = [line for line in fileinput.input()]
head = Counter(re.compile(r"([a-z]+)").findall("\n".join(lines))).most_common()[-1][0]
assert head == "cqmvs"
def parse(line):
name, weight, *ch = c.findall(line)
values[name] = int(weight)
children[name] = ch
data = [parse(line) for line in lines]
ans = None
def t(name):
global ans
if children[name]:
v, s = zip(*[(values[n], t(n)) for n in children[name]])
if len(set(s)) > 1:
mc = list(map(itemgetter(0), Counter(s).most_common()))
diff = mc[-1] - mc[0]
if not ans:
ans = v[s.index(mc[-1])] - diff
return values[name] + sum(s)
else:
return values[name]
t(head)
assert ans == 2310
| StarcoderdataPython |
5199441 | <reponame>cfpb/regulations-parser
# vim: set encoding=utf-8
from regparser.tree.appendix.tree import trees_from as appendix_trees
from regparser.tree.interpretation import build as build_interp_tree
from regparser.tree.reg_text import build_reg_text_tree
from regparser.tree.supplement import find_supplement_start
import re
def find_cfr_part(text):
"""Figure out what CFR this is referring to from the text."""
for match in re.finditer(ur"^PART (\d+)[-—\w]", text):
return int(match.group(1))
def build_whole_regtree(text):
"""Combine the output of numerous functions to get to a whole regulation
tree."""
part = find_cfr_part(text)
reg_tree = build_reg_text_tree(text, part)
appendices = appendix_trees(text, part, reg_tree.label)
reg_tree.children.extend(appendices)
supplement_start = find_supplement_start(text)
if supplement_start is not None:
interps = build_interp_tree(text[supplement_start:], part)
reg_tree.children.append(interps)
return reg_tree
| StarcoderdataPython |
3561412 | <filename>nara/parse_diff.txt.py<gh_stars>0
import os
from shutil import copyfile
with open("new_csv.txt") as f:
for l in f.readlines():
filename = os.path.join(
"/Users/matt.mcgrattan/Documents/Github/ida-treaty-explorer-data/nara/new_csvs",
l.replace("Only in new_csvs: ", "").strip(),
)
copyfile(filename,
f"/Users/matt.mcgrattan/Documents/Github/ida-treaty-explorer-data/nara/csvs_not_in_dlcs/{l.replace('Only in new_csvs: ', '').strip()}")
| StarcoderdataPython |
6629750 | <gh_stars>1000+
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
from django.db import transaction
from rest_framework import status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import ValidationError
from rest_framework.filters import OrderingFilter
from rest_framework.mixins import CreateModelMixin, DestroyModelMixin
from rest_framework.response import Response
from rest_framework.settings import api_settings
from pretix.api.serializers.cart import (
CartPositionCreateSerializer, CartPositionSerializer,
)
from pretix.base.models import CartPosition
from pretix.base.services.locking import NoLockManager
class CartPositionViewSet(CreateModelMixin, DestroyModelMixin, viewsets.ReadOnlyModelViewSet):
serializer_class = CartPositionSerializer
queryset = CartPosition.objects.none()
filter_backends = (OrderingFilter,)
ordering = ('datetime',)
ordering_fields = ('datetime', 'cart_id')
lookup_field = 'id'
permission = 'can_view_orders'
write_permission = 'can_change_orders'
def get_queryset(self):
return CartPosition.objects.filter(
event=self.request.event,
cart_id__endswith="@api"
).select_related('seat').prefetch_related('answers')
def get_serializer_context(self):
ctx = super().get_serializer_context()
ctx['event'] = self.request.event
ctx['quota_cache'] = {}
return ctx
def create(self, request, *args, **kwargs):
serializer = CartPositionCreateSerializer(data=request.data, context=self.get_serializer_context())
serializer.is_valid(raise_exception=True)
with transaction.atomic(), self.request.event.lock():
self.perform_create(serializer)
cp = serializer.instance
serializer = CartPositionSerializer(cp, context=serializer.context)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=False, methods=['POST'])
def bulk_create(self, request, *args, **kwargs):
if not isinstance(request.data, list): # noqa
return Response({"error": "Please supply a list"}, status=status.HTTP_400_BAD_REQUEST)
ctx = self.get_serializer_context()
with transaction.atomic():
serializers = [
CartPositionCreateSerializer(data=d, context=ctx)
for d in request.data
]
lockfn = self.request.event.lock
if not any(s.is_valid(raise_exception=False) for s in serializers):
lockfn = NoLockManager
results = []
with lockfn():
for s in serializers:
if s.is_valid(raise_exception=False):
try:
cp = s.save()
except ValidationError as e:
results.append({
'success': False,
'data': None,
'errors': {api_settings.NON_FIELD_ERRORS_KEY: e.detail},
})
else:
results.append({
'success': True,
'data': CartPositionSerializer(cp, context=ctx).data,
'errors': None,
})
else:
results.append({
'success': False,
'data': None,
'errors': s.errors,
})
return Response({'results': results}, status=status.HTTP_200_OK)
def perform_create(self, serializer):
serializer.save()
| StarcoderdataPython |
6525315 | n = int(input('Input the length of the list: '))
numbers = [int(input(f'Input {i} index number: ')) for i in range(n)]
print("Our manual entered list %s " % numbers)
print("The sum of numbers %d" % sum(numbers))
total = 1
for i in numbers:
total *= i
print("The mult of numbers %d" % total)
| StarcoderdataPython |
6468085 | <gh_stars>0
from typing import Optional
from tortoise.contrib.pydantic import PydanticModel
from db.base.schemas import GetFile
from db.project.models import Project
class CreateProject(PydanticModel):
name: str
city: str
address: str
title: str
description: str
type: str
square: float
count_levels: int
year: int
class Config:
orig_model = Project
class GetProject(CreateProject):
id: int
preview: Optional[GetFile]
image: Optional[GetFile]
| StarcoderdataPython |
3458012 | <gh_stars>1-10
"""
(c) ΔQ Programming LLP, 2021
This program is free software; you may redistribute and/or modify
it under the terms of the Apache License v2.0.
"""
import os
import pathlib
import requests
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""
A management command that fetches and installs the latest ROR support
"""
help = "Installs ROR functionality into Thoth components"
def handle(self, *args, **options):
url = 'https://zenodo.org/api/records/' \
'?communities=ror-data&sort=mostrecent'
meta_response = requests.get(url)
print(meta_response)
print("ROR fixtures installed. At next Thoth sync, ROR functionality "
"will be enabled.")
| StarcoderdataPython |
5153523 | <filename>ressources/migrations/0008_auto_20200914_0007.py<gh_stars>0
# Generated by Django 3.0.3 on 2020-09-13 23:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ressources', '0007_auto_20200909_1811'),
]
operations = [
migrations.AlterModelOptions(
name='faculty',
options={'ordering': ('name',)},
),
migrations.AddField(
model_name='department',
name='level',
field=models.FloatField(null=True, verbose_name='Niveau'),
),
migrations.AlterField(
model_name='course',
name='semester',
field=models.CharField(choices=[('Semestre 1', 'Semestre 1'), ('Semestre 2', 'Semestre 2')], default='Semestre 1', max_length=200, null=True, verbose_name='Semestre'),
),
]
| StarcoderdataPython |
5046551 | import os
import unittest
import random
from src.hash import Hasher
from src.enums import CryptoSrcEnum
class TestEncryption(unittest.TestCase):
def test_valid_scrypt_hash(self):
for _ in range(10):
salt = os.urandom(32)
message = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.CRYPTO, salt)
cryptoblock = hasher.hash(message)[:len(salt)]
self.assertTrue(hasher.validate(message, cryptoblock))
def test_invalid_scrypt_hash(self):
for _ in range(10):
salt = os.urandom(32)
message = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.CRYPTO, salt)
cryptoblock = hasher.hash(message)[:len(salt)]
message = list(message)
message[random.randrange(0, len(message))] = 11
message = bytes(message)
self.assertFalse(hasher.validate(message, cryptoblock))
def test_invalid_scrypt_salt(self):
for _ in range(10):
salt = os.urandom(32)
message = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.CRYPTO, salt)
cryptoblock = hasher.hash(message)[:len(salt)]
salt = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.CRYPTO, salt)
self.assertFalse(hasher.validate(message, cryptoblock))
def test_valid_nacl_hash(self):
for _ in range(10):
message = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.NACL)
cryptoblock = hasher.hash(message)
self.assertTrue(hasher.validate(message, cryptoblock))
def test_invalid_nacl_hash(self):
for _ in range(10):
message = os.urandom(32)
hasher = Hasher(CryptoSrcEnum.NACL)
cryptoblock = hasher.hash(message)
message = list(message)
message[random.randrange(0, len(message))] = 11
message = bytes(message)
self.assertFalse(hasher.validate(message, cryptoblock))
| StarcoderdataPython |
1816989 | <reponame>darrencheng0817/AlgorithmLearning
'''
Created on 2016年2月16日
@author: Darren
'''
'''
Given a binary tree, find the largest subtree which is a Binary Search Tree (BST), where largest means subtree with largest number of nodes in it.
Note:
A subtree must include all of its descendants.
Here's an example:
10
/ \
5 15
/ \ \
1 8 7
The Largest BST Subtree in this case is the highlighted one.
The return value is the subtree's size, which is 3.
'''
class Solution(object):
def largestBSTSubtree(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def Util(root):
if not root:
return 0,0,float('inf'),float('-inf')
N1,n1,min1,max1=Util(root.left)
N2,n2,min2,max2=Util(root.right)
n=n1+n2+1 if max1<root.val<min2 else float('-inf')
return max(N1,N2,n),n,min(min1,root.val),max(root.val,max2)
return Util(root)[0] | StarcoderdataPython |
5078669 | import requests
import pandas as pd
import util
currency_symbols = {
'bitcoin': 'BTC',
'SuperNET': 'KMD',
'eos': 'EOS',
'cardano-sl': 'ADA',
'lisk': 'LSK',
'0x.js': 'ZRX',
'skycoin': 'SKY'
}
def parse_contributors(data):
contributor_number_list = [0] * 52
for i in range(0, len(data)):
json_weeks = data[i]
iter_ = 0
for week in json_weeks['weeks'][-52:]:
if week['c'] != 0:
contributor_number_list[iter_] = contributor_number_list[iter_] + 1
iter_ = iter_ + 1
return contributor_number_list
def parse_activities(data):
commit_number_list = []
for i in range(0, len(data)):
json_week = data[i]
commit_number_list.append(json_week['total'])
return commit_number_list
def parse_frequencies(data):
weeks_list = []
additions_number_list = []
deletions_number_list = []
for week in data[-52:]:
weeks_list.append(week[0])
additions_number_list.append(week[1])
deletions_number_list.append(abs(week[2]))
frequencies_list = [weeks_list,
additions_number_list,
deletions_number_list]
return frequencies_list
def parse_historical_prices(data, currency):
currency_symbol = currency_symbols[currency]
currency_price_list = [data[currency_symbol]['BTC'],
data[currency_symbol]['EUR']]
return currency_price_list
def request_contributors(url, user, repo, token):
request_url = url + user + '/' + repo + '/' + 'stats/contributors'
r = requests.get(request_url, headers={'Authorization': 'access_token ' + token}).json()
return r
def request_activity(url, user, repo, token):
request_url = url + user + '/' + repo + '/' + 'stats/commit_activity'
r = requests.get(request_url, headers={'Authorization': 'access_token ' + token}).json()
return r
def request_frequency(url, user, repo, token):
request_url = url + user + '/' + repo + '/' + 'stats/code_frequency'
r = requests.get(request_url, headers={'Authorization': 'access_token ' + token}).json()
return r
def request_historical_price(currency, timestamp):
currency_symbol = currency_symbols[currency]
request_url = 'https://min-api.cryptocompare.com/data/pricehistorical?' \
'fsym=' + currency_symbol + '&tsyms=BTC,EUR&ts='+str(timestamp)
r = requests.get(request_url).json()
return r
def get_currency_data(user, repo):
url = 'https://api.github.com/repos/'
token = util.get_auth_token()
user = user
repo = repo
json_contributors = request_contributors(url, user, repo, token)
contributors_list = parse_contributors(json_contributors)
json_activity = request_activity(url, user, repo, token)
activity_list = parse_activities(json_activity)
json_frequency = request_frequency(url, user, repo, token)
frequencies_list = parse_frequencies(json_frequency)
columns = ['timestamp', 'contributors_'+currency_symbols[repo], 'commits_'+currency_symbols[repo], 'additions_' +
currency_symbols[repo], 'deletions_' + currency_symbols[repo], 'BTC_price_' + currency_symbols[repo],
'EUR_price_' + currency_symbols[repo]]
print(columns)
df = pd.DataFrame(columns=columns)
for i in range(0, 52):
json_historical_prices = request_historical_price(repo, frequencies_list[0][i])
historical_prices = parse_historical_prices(json_historical_prices, repo)
row = [frequencies_list[0][i], contributors_list[i], activity_list[i],
frequencies_list[1][i], frequencies_list[2][i], historical_prices[0], historical_prices[1]]
df.loc[i] = row
return df
| StarcoderdataPython |
3425233 | #!/usr/bin/env python3
""" Testing suite to validate the analysis """
import unittest
import os
import numpy as np
from sauron import saruman
class TestCellFinder(unittest.TestCase):
""" Class to test the analysis of the image """
def setUp(self):
data_dir = os.path.abspath(os.path.join(__file__, '../data'))
red_file = os.path.join(data_dir, 'NS_10_10_PEG50_Z_ave-8_640.tif')
green_file = os.path.join(data_dir, 'NS_10_10_PEG50_Z_ave-8_488.tif')
self.stack = saruman.read_stack(red_file, green_file)
self.iz = 23
self.row_min = 95
self.row_max = 164
self.col_min = 405
self.col_max = 470
self.cell = np.copy(
self.stack[self.iz][
self.row_min:self.row_max,
self.col_min:self.col_max, :])
def test_get_rgb(self):
""" Tests the 'get_rgb' function """
x_vals = np.linspace(10, 12, 4)
y_vals = np.linspace(10, 12, 4)
expected_rgb_vals = [[[] for iy in range(4)] for ix in range(4)]
expected_rgb_vals[0][0] = [0.21829, 0.22799, 0.00000]
expected_rgb_vals[0][1] = [0.22468, 0.26831, 0.00000]
expected_rgb_vals[0][2] = [0.25098, 0.30971, 0.00000]
expected_rgb_vals[0][3] = [0.29720, 0.35218, 0.00000]
expected_rgb_vals[1][0] = [0.23304, 0.27499, 0.00000]
expected_rgb_vals[1][1] = [0.25582, 0.31500, 0.00000]
expected_rgb_vals[1][2] = [0.28638, 0.35878, 0.00000]
expected_rgb_vals[1][3] = [0.32473, 0.40635, 0.00000]
expected_rgb_vals[2][0] = [0.27778, 0.32260, 0.00000]
expected_rgb_vals[2][1] = [0.29777, 0.36356, 0.00000]
expected_rgb_vals[2][2] = [0.32612, 0.40647, 0.00000]
expected_rgb_vals[2][3] = [0.36283, 0.45132, 0.00000]
expected_rgb_vals[3][0] = [0.35251, 0.37079, 0.00000]
expected_rgb_vals[3][1] = [0.35054, 0.41398, 0.00000]
expected_rgb_vals[3][2] = [0.37021, 0.45276, 0.00000]
expected_rgb_vals[3][3] = [0.41150, 0.48712, 0.00000]
for (x_idx, x_val) in enumerate(x_vals):
for (y_idx, y_val) in enumerate(y_vals):
rgb_vals = saruman.get_rgb(x_val, y_val, self.cell)
for i in range(3):
rgb_diff = rgb_vals[i] - expected_rgb_vals[x_idx][y_idx][i]
self.assertTrue(np.abs(rgb_diff) < 1e-3)
def test_find_center(self):
""" Tests the 'find_center' function """
local_xy = saruman.find_center(self.cell)
self.assertEqual(local_xy, [33, 35])
def test_get_radii(self):
""" Tests the 'get_radii' function """
local_xy = saruman.find_center(self.cell)
self.assertEqual(local_xy, [33, 35])
# When np.pi is used for 'thetas' (1.1.0.1)
'''
expected_radii = [
28.89, 28.98, 29.14, 30.03, 30.13, 30.41, 29.86, 29.99,
29.76, 30.16, 30.32, 30.07, 30.29, 30.38, 30.60, 30.47,
30.63, 30.56, 30.56, 30.35, 30.57, 31.00, 31.05, 30.51,
30.79
]
'''
# When 2.0*np.pi is used for 'thetas' (1.1.0.2)
expected_radii = [
28.89, 29.14, 30.13, 29.86, 29.76, 30.32, 30.29, 30.60,
30.63, 30.56, 30.57, 31.05, 30.79, 31.13, 32.35, 33.82,
34.69, 33.05, 31.80, 30.88, 30.20, 29.56, 28.67, 28.67,
28.89
]
radii = saruman.get_radii(local_xy, self.cell, 25)
for (expected_radius, radius) in zip(expected_radii, radii):
self.assertTrue(np.abs(radius - expected_radius) < 1e-7)
def test_get_half_max(self):
""" Tests the 'get_half_max' function """
x_vals = np.linspace(0.0, 1.0, 1000)
y_vals = 1.0 - x_vals*x_vals
half = saruman.get_half_max(x_vals, y_vals)
self.assertTrue(np.abs(half - np.sqrt(2.0)/2.0) < 1e-3)
x_vals = np.linspace(0.0, 1.0, 1000)
y_vals = 1.0 - x_vals*x_vals*x_vals*x_vals
half = saruman.get_half_max(x_vals, y_vals)
self.assertTrue(np.abs(half - (1.0/2.0)**(1/4)) < 1e-3)
def test_find_cell_borders(self):
""" Tests the 'find_cell_borders' function """
zslice = self.stack[self.iz]
(row_min, row_max, col_min, col_max) = saruman.find_cell_borders(zslice)
self.assertEqual(row_min, 0)
self.assertEqual(row_max, 21)
self.assertEqual(col_min, 0)
self.assertEqual(col_max, 37)
def test_get_cell(self):
""" Tests the 'get_cell' function """
zslice = self.stack[self.iz]
(l_coords, g_coords, cell, _, zslice) = saruman.get_cell(zslice, 25)
expected_local_coords = [33, 35]
expected_global_coords = [438, 130]
self.assertEqual(l_coords, expected_local_coords)
self.assertEqual(g_coords, expected_global_coords)
self.assertFalse((cell - self.cell).any())
def test_grima(self):
""" Tests the 'grima' function """
pass
def test_normalize(self):
""" Tests the 'normalize' function """
pass
def test_get_shell_width(self):
""" Test 'get_shell_width' functionality """
thetas = np.linspace(0.0, 4.0*np.pi, 10000)
xvals = np.cos(thetas)
yvals = np.sin(thetas)
(th1, th2, intg) = saruman.get_shell_width(thetas, xvals, yvals)
self.assertEqual(np.round(np.pi/4, decimals=3), np.round(th1, decimals=3))
self.assertEqual(np.round(np.pi + np.pi/4, decimals=3), np.round(th2, decimals=3))
self.assertEqual(np.round(2.0*np.sqrt(2), decimals=3), np.round(intg, decimals=3))
def test_get_cells(self):
""" Tests the 'get_cells' function """
zslice = self.stack[self.iz]
(l_coords, g_coords, _, _) = saruman.get_cells(zslice, 2)
expected_local_coords = [
[33, 35],
#[29, 30] #When using np.pi for 'thetas' (1.1.0.1)
[32, 31] #When using 2.0*np.pi for 'thetas' (1.1.0.2)
]
expected_global_coords = [
[438, 130],
#[295, 457] # When using np.pi
[91, 102] # When using 2.0*np.pi
]
for (l_xy, expected_l_xy) in zip(l_coords, expected_local_coords):
self.assertEqual(l_xy, expected_l_xy)
for (g_xy, expected_g_xy) in zip(g_coords, expected_global_coords):
self.assertEqual(g_xy, expected_g_xy)
| StarcoderdataPython |
12802831 | <reponame>jpegbert/pycorrector
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
from pathlib import Path
from tqdm import tqdm
log = logging.getLogger(__name__)
def _fetch_from_remote(url, force_download=False, cached_dir='~/.paddle-ernie-cache'):
import hashlib, requests, tarfile
sig = hashlib.md5(url.encode('utf8')).hexdigest()
cached_dir = Path(cached_dir).expanduser()
try:
cached_dir.mkdir()
except OSError:
pass
cached_dir_model = cached_dir / sig
if force_download or not cached_dir_model.exists():
cached_dir_model.mkdir()
tmpfile = cached_dir_model / 'tmp'
with tmpfile.open('wb') as f:
# url = 'https://ernie.bj.bcebos.com/ERNIE_stable.tgz'
r = requests.get(url, stream=True)
total_len = int(r.headers.get('content-length'))
for chunk in tqdm(r.iter_content(chunk_size=1024),
total=total_len // 1024,
desc='downloading %s' % url,
unit='KB'):
if chunk:
f.write(chunk)
f.flush()
log.debug('extacting... to %s' % tmpfile)
with tarfile.open(tmpfile.as_posix()) as tf:
tf.extractall(path=cached_dir_model.as_posix())
os.remove(tmpfile.as_posix())
log.debug('%s cached in %s' % (url, cached_dir))
return cached_dir_model
def add_docstring(doc):
def func(f):
f.__doc__ += ('\n======other docs from supper class ======\n%s' % doc)
return f
return func
| StarcoderdataPython |
6643056 | <reponame>zhangzhenqiqi/ActionCLIP
import torch
from ActionNet.action import Action
import pickle
import pandas as pd
def test():
torch.randint()
input = torch.randn(8 * 8, 64, 10, 10) # (N*T,C_in,H_in,W_in)
print(input.size())
net = torch.nn.Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
output1 = net(input)
print(output1.size())
net = Action(net, n_segment=8, shift_div=8)
output2 = net(input)
print(net)
print(output2.size())
def pkl():
path = '../data/sthv2_annotation/train.pkl'
pkl_file = open(path, 'rb')
data = pickle.load(pkl_file)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('max_colwidth', 1000000)
print(type(data))
print('---')
print(data[0:3])
print('---')
if __name__ == '__main__':
pkl()
| StarcoderdataPython |
4997696 | from flask_login import UserMixin
from harperdb.exceptions import HarperDBError
from db import db
from db import schema
table = "accounts"
class Accounts:
@staticmethod
def create(username: str, password: str, superuser: bool):
payload = dict(
username=username,
password=password,
superuser=superuser,
)
return db.insert(schema, table, [payload])
@staticmethod
def get(username):
try:
user = db.search_by_value(schema, table, "username", username)
except HarperDBError:
user = None
if user == []:
user = None
return user
class User(UserMixin):
def __init__(self, username: str) -> None:
super().__init__()
self.id = username
| StarcoderdataPython |
296450 | from __future__ import absolute_import
import numpy as np
from .Node import Op, NAME_RULE, PROFILING_MODE
from .. import profiler
from .._base import get_array_memory
class Conv2dOp(Op):
# nodeA : x nodeB : filter
def __call__(self, node_A, node_B, padding=0, padding2 = None, stride=1, For_ResNet = False):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
self.padding = padding
self.padding2 = padding2
self.For_ResNet = For_ResNet
if padding2 is None:
self.padding2 = self.padding
self.stride = stride
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
# print "init padding = ", padding
if NAME_RULE == 0:
new_node.name = "Conv2d(%s, %s)" % (node_A.name, node_B.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d"
else:
new_node.name = "conv2d"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" % (node_A.name, node_B.name)
return new_node
def im2col(self, X, filter_H, filter_W, padding, stride):
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
y_row_size = C * filter_H * filter_W
y_col_size = out_H * out_W
y_shape = (N, y_row_size, y_col_size)
Y = np.empty(y_shape, dtype=X.dtype)
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = col_index / out_W
out_x = col_index % out_W
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
Y[batch_index, row_idx,
col_index] = X[batch_index, c, y, x]
row_idx += 1
return Y
def np_conv2d(self, X, Filter, padding=0, stride=1):
"""Implement a conv2d as a matrix multiply after im2col."""
filter_outChannel, filter_inChannel, filter_H, filter_W = Filter.shape
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
im2col_matrix = self.im2col(X, filter_H, filter_W, padding, stride)
filter_matrix = Filter.reshape(filter_outChannel, -1)
return np.matmul(filter_matrix, im2col_matrix).reshape(N, filter_outChannel, out_H, out_W)
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape) + \
get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# TODO
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
# import time
# start = time.time()
from ..gpu_links import CuDNN_conv2d
CuDNN_conv2d(input_vals[0], input_vals[1],
output_val, self.padding, self.padding2, self.stride, None, node.profiler)
# print("time.time: {} ms".format((time.time() - start) * 1000))
# node.profiler.time = time.time() - start
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert len(input_vals) == 2
if use_numpy:
from .._base import DNNL_LIB
if DNNL_LIB['DnnlConv2d']:
from ..cpu_links import conv2d as cpu_conv2d
from ..ndarray import numpyasdlarrayhandle
input_x = numpyasdlarrayhandle(input_vals[0])
input_f = numpyasdlarrayhandle(input_vals[1])
output = numpyasdlarrayhandle(output_val)
cpu_conv2d(input_x, input_f, output, self.padding, self.stride)
else:
output_val[:] = self.np_conv2d(
input_vals[0], input_vals[1], self.padding, self.stride)
else:
from ..gpu_links import CuDNN_conv2d
CuDNN_conv2d(input_vals[0], input_vals[1],
output_val, self.padding, self.padding2, self.stride, stream_handle, None)
def gradient(self, node, output_grad):
return [conv2d_gradient_of_data_op(node.inputs[1], output_grad, self.padding, self.padding2, self.stride, self.For_ResNet),\
conv2d_gradient_of_filter_op(node.inputs[0], output_grad, self.padding, self.padding2, self.stride)]
def infer_shape(self, node, input_shapes):
assert len(input_shapes) == 2
# print "infer padding = ",self.padding
N, _, H, W = input_shapes[0]
f_O, _, f_H, f_W = input_shapes[1]
padding = self.padding
padding2 = self.padding2
stride = self.stride
filter_H = input_shapes[1][2]
filter_W = input_shapes[1][3]
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding2 - filter_W) / stride + 1
# print "conv2d_shape"
# print(N, f_O, out_H, out_W)
return (N, f_O, out_H, out_W)
class Conv2d_Gradient_of_DataOp(Op):
# nodeA : filter nodeB : Y_gradient
def __call__(self, node_A, node_B, padding=0, padding2 = None, stride=1, For_ResNet = False):
new_node = Op.__call__(self)
new_node.inputs = [node_A, node_B]
self.padding = padding
self.padding2 = padding2
self.stride = stride
self.For_ResNet = For_ResNet
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
if NAME_RULE == 0:
new_node.name = "Conv2d_Gradient_of_DataOp(%s, %s)" % (
node_A.name, node_B.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d_Gradient_of_DataOp"
else:
new_node.name = "Conv2d_Gradient_of_DataOp"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" % (node_A.name, node_B.name)
return new_node
def im2col_transpose(self, N, C, H, W, filter_H, filter_W, Y, padding, stride):
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
_, y_row_size, y_col_size = Y.shape
der_X_shape = (N, C, H, W)
der_X = np.zeros(der_X_shape, dtype=Y.dtype)
# print "batch_size", N
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = col_index / out_W
out_x = col_index % out_W
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
der_X[batch_index, c, y,
x] += Y[batch_index, row_idx, col_index]
row_idx += 1
return der_X
def np_Conv2dGradient_data(self, X_N, X_C, X_H, X_W, Filter, Y, padding=0, stride=1):
filter_outChannel, filter_inChannel, filter_H, filter_W = Filter.shape
Y_N, Y_C, Y_H, Y_W = Y.shape
YY = Y.reshape((Y_N, Y_C, Y_H * Y_W)) # transformed to im2col Y
F_filter = Filter.reshape((filter_outChannel, -1))
gradient_im2col_XX = np.matmul(F_filter.T, YY)
gradient_X = self.im2col_transpose(
X_N, X_C, X_H, X_W, filter_H, filter_W, gradient_im2col_XX, padding, stride) # gradient of x
return gradient_X
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape) + \
get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
# import time
# start = time.time()
from ..gpu_links import CuDNN_conv2d_gradient_of_data
CuDNN_conv2d_gradient_of_data(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2=self.padding2, stride=self.stride, stream = None, profiler = node.profiler)
# node.profiler.time = time.time() - start
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert len(input_vals) == 2
N = input_vals[1].shape[0]
C = input_vals[0].shape[1]
H = (input_vals[1].shape[2] - 1) * self.stride + \
input_vals[0].shape[2] - 2 * self.padding
W = (input_vals[1].shape[3] - 1) * self.stride + \
input_vals[0].shape[3] - 2 * self.padding
if use_numpy:
from .._base import DNNL_LIB
if DNNL_LIB['DnnlConv2d_Gradient_of_Data']:
from ..cpu_links import conv2d_gradient_of_data as cpu_conv2d_gradient_of_data
from ..ndarray import numpyasdlarrayhandle
input_f = numpyasdlarrayhandle(input_vals[0])
gradient_y = numpyasdlarrayhandle(input_vals[1])
gradient_x = numpyasdlarrayhandle(output_val)
cpu_conv2d_gradient_of_data(input_f, gradient_y, gradient_x, self.padding, self.stride)
else:
output_val[:] = self.np_Conv2dGradient_data(
N, C, H, W, input_vals[0], input_vals[1], padding=self.padding, stride=self.stride)
else:
from ..gpu_links import CuDNN_conv2d_gradient_of_data
CuDNN_conv2d_gradient_of_data(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2 = self.padding2, stride=self.stride, stream = stream_handle, profiler = None)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
"""TODO: Your code here"""
# print self.For_ResNet
assert len(input_shapes) == 2
N = input_shapes[1][0]
C = input_shapes[0][1]
H = (input_shapes[1][2] - 1) * self.stride + \
input_shapes[0][2] - 2 * self.padding + (1 if self.For_ResNet and self.stride == 2 else 0)
W = (input_shapes[1][3] - 1) * self.stride + \
input_shapes[0][3] - 2 * self.padding2 + (1 if self.For_ResNet and self.stride == 2 else 0)
return (N, C, H, W)
class Conv2d_Gradient_of_FilterOp(Op):
# nodeA : input_x nodeB : gradient_Y
def __call__(self, input_X, gradient_Y, padding=0, padding2=None, stride=1):
new_node = Op.__call__(self)
new_node.inputs = [input_X, gradient_Y]
self.padding = padding
if padding2 is None:
self.padding2 = self.padding
else:
self.padding2 = padding2
self.stride = stride
new_node.profiler = None
if PROFILING_MODE == 1:
new_node.profiler = profiler.CreateProfiler()
if NAME_RULE == 0:
new_node.name = "Conv2d_Gradient_of_FilterOp(%s, %s)" % (
input_X.name, gradient_Y.name)
elif NAME_RULE == 1:
new_node.name = "Conv2d_Gradient_of_FilterOp"
else:
new_node.name = "Conv2d_Gradient_of_FilterOp"+str(new_node.id)
new_node.desc = new_node.name + \
"(%s, %s)" % (input_X.name, gradient_Y.name)
return new_node
def im2col(self, X, filter_H, filter_W, padding, stride):
N, C, H, W = X.shape
assert (H + 2 * padding - filter_H) % stride == 0
assert (W + 2 * padding - filter_W) % stride == 0
out_H = (H + 2 * padding - filter_H) / stride + 1
out_W = (W + 2 * padding - filter_W) / stride + 1
y_row_size = C * filter_H * filter_W
y_col_size = out_H * out_W
y_shape = (N, y_row_size, y_col_size)
Y = np.empty(y_shape, dtype=X.dtype)
for batch_index in range(N):
for col_index in range(y_col_size):
out_y = col_index / out_W
out_x = col_index % out_W
in_y = out_y * stride - padding
in_x = out_x * stride - padding
row_idx = 0
for c in range(0, C):
for y in range(in_y, in_y + filter_H):
for x in range(in_x, in_x + filter_W):
if (x < 0 or x >= W or y < 0 or y >= H):
Y[batch_index, row_idx, col_index] = 0
else:
Y[batch_index, row_idx,
col_index] = X[batch_index, c, y, x]
row_idx += 1
return Y
def np_Conv2dGradient_Filter(self, filter_outChannel, filter_inChannel, filter_H, filter_W, X, Y, padding=0, stride=1):
"""Implement a conv2d_transpose as a matrix multiply after im2col."""
X_N, X_C, X_H, X_W = X.shape
Y_N, Y_C, Y_H, Y_W = Y.shape
YY = Y.reshape((Y_N, Y_C, Y_H * Y_W)) # transformed to im2col Y
# XX = X.reshape((X_N, X_C, X_W * X_H)) # transformed to im2col X
im2col_XX = self.im2col(X, filter_H, filter_W, padding, stride)
gradient_filter = np.zeros(shape=(
filter_outChannel, filter_inChannel * filter_H * filter_W), dtype=Y.dtype)
for i in range(X_N):
gradient_filter += np.matmul(YY[i], im2col_XX[i].T)
gradient_filter = gradient_filter.reshape(
(filter_outChannel, filter_inChannel, filter_H, filter_W))
return gradient_filter
# out_H = (H + 2 * padding - filter_H) / stride + 1
# out_W = (W + 2 * padding - filter_W) / stride + 1
def profile(self, node, input_vals, output_val, is_static = True):
assert len(input_vals) == 2
if is_static:
# input memory
node.profiler.input_memory = get_array_memory(input_vals[0].shape)
# get_array_memory(input_vals[1].shape)
# output memory
node.profiler.output_memory = get_array_memory(output_val.shape)
# no workspace
node.profiler.workspace_memory = 0
# execute time
node.profiler.time = node.profiler.output_memory / 4 * profiler.FLOPS_PER_SECOND
else:
# import time
# start = time.time()
from ..gpu_links import CuDNN_conv2d_gradient_of_filter
CuDNN_conv2d_gradient_of_filter(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2=self.padding2, stride=self.stride, stream = None, profiler = node.profiler)
# node.profiler.time = time.time() - start
def compute(self, node, input_vals, output_val, use_numpy=True, stream_handle=None):
assert len(input_vals) == 2
f_N = input_vals[1].shape[1]
f_C = input_vals[0].shape[1]
f_H = input_vals[1].shape[2] + 2 * self.padding - \
(input_vals[1].shape[2] - 1) * self.stride
f_W = input_vals[1].shape[3] + 2 * self.padding - \
(input_vals[1].shape[3] - 1) * self.stride
if use_numpy:
from .._base import DNNL_LIB
if DNNL_LIB['DnnlConv2d_Gradient_of_Filter']:
from ..cpu_links import conv2d_gradient_of_filter as cpu_conv2d_gradient_of_filter
from ..ndarray import numpyasdlarrayhandle
input_x = numpyasdlarrayhandle(input_vals[0])
gradient_y = numpyasdlarrayhandle(input_vals[1])
gradient_f = numpyasdlarrayhandle(output_val)
cpu_conv2d_gradient_of_filter(input_x, gradient_y, gradient_f, self.padding, self.stride)
else:
output_val[:] = self.np_Conv2dGradient_Filter(
f_N, f_C, f_H, f_W, input_vals[0], input_vals[1], padding=self.padding, stride=self.stride)
else:
from ..gpu_links import CuDNN_conv2d_gradient_of_filter
CuDNN_conv2d_gradient_of_filter(
input_vals[0], input_vals[1], output_val, padding=self.padding, padding2=self.padding2, stride=self.stride, stream = stream_handle, profiler = None)
def gradient(self, node, output_grad):
raise NotImplementedError
def infer_shape(self, node, input_shapes):
"""TODO: Your code here"""
assert len(input_shapes) == 2
f_N = input_shapes[1][1]
f_C = input_shapes[0][1]
f_H = input_shapes[0][2] + 2 * self.padding - \
(input_shapes[1][2] - 1) * self.stride
f_W = input_shapes[0][3] + 2 * self.padding2 - \
(input_shapes[1][3] - 1) * self.stride
return (f_N, f_C, f_H, f_W)
def conv2d_op(node_A, node_B, padding=0, padding2=None, stride=1, For_ResNet = False):
"""Conv2d node.
Parameters:
----
node_A : Node
Input data node.
node_B : Node
Input filter node.
padding :
Padding size.
stride :
Stride size.
Returns:
----
A new Node instance created by Op.
"""
return Conv2dOp()(node_A, node_B, padding, padding2, stride, For_ResNet)
def conv2d_gradient_of_data_op(node_A, node_B, padding=0, padding2 = None,stride=1, For_ResNet = False):
"""Gradient node of data of conv2d.
Parameters:
----
node_A : Node
Filter node.
node_B : Node
Previous gradient node.
padding :
Padding size.
stride :
Stride size.
Returns:
----
A new Node instance created by Op.
"""
return Conv2d_Gradient_of_DataOp()(node_A, node_B, padding, padding2, stride, For_ResNet)
def conv2d_gradient_of_filter_op(input_X, gradient_Y, padding=0, padding2=None, stride=1):
"""Gradient node of filters of conv2d.
Parameters:
----
input_X :
Input data of conv2d.
gradient_Y :
Gradient array.
padding :
Padding size.
stride :
Stride size.
Returns:
----
A new Node instance created by Op.
"""
return Conv2d_Gradient_of_FilterOp()(input_X, gradient_Y, padding, padding2, stride)
| StarcoderdataPython |
6485091 | #!/usr/bin/env python3
"""Noma [node management]
Usage: noma start
noma stop
noma check
noma logs
noma info
noma lnd create
noma lnd backup
noma lnd autounlock
noma lnd autoconnect [<path>]
noma lnd savepeers
noma lnd connectapp
noma lnd connectstring
noma (-h|--help)
noma --version
Options:
-h --help Show this screen.
--version Show version.
"""
import os
from docopt import docopt
from noma import lnd
from noma import node
def lnd_fn(args):
"""
lnd related functionality
"""
if args["create"]:
lnd.check_wallet()
elif args["autounlock"]:
lnd.autounlock()
elif args["backup"]:
lnd.backup()
elif args["autoconnect"]:
lnd.autoconnect(args["<path>"])
elif args["savepeers"]:
lnd.savepeers()
elif args["connectstring"]:
lnd.connectstring()
def node_fn(args):
"""
node related functionality
"""
if args["info"]:
node.info()
elif args["start"]:
node.start()
elif args["stop"]:
node.stop()
elif args["logs"]:
node.logs()
elif args["check"]:
node.check()
def main():
"""
main noma entrypoint function
"""
args = docopt(__doc__, version="Noma v0.5.1")
if os.geteuid() == 0:
if args["lnd"]:
lnd_fn(args)
else:
node_fn(args)
else:
print("Sorry! You must be root")
if __name__ == "__main__":
main()
| StarcoderdataPython |
1948608 | """
Module: 'ntptime' on esp32 3.0.0
"""
# MCU: (sysname='esp32', nodename='esp32', release='3.0.0', version='v3.0.0 on 2020-01-29', machine='ESP32 module with ESP32')
# Stubber: 1.3.2
NTP_DELTA = 3155673600
host = 'pool.ntp.org'
def settime():
pass
socket = None
struct = None
def time():
pass
| StarcoderdataPython |
3261657 | <reponame>TomMalkin/Sqema
# -*- coding: utf-8 -*-
"""Defines the Sqema class."""
import pathlib
import simqle
import pandas as pd
class DatabaseRoot:
def __init__(self, database_directory: str, con_name: str):
"""
:type con_name: str
:type sqema_directory: str
"""
self.con_name = con_name
self.path = pathlib.Path(database_directory)
def find_object(self, cm, search_path=None, schema=None):
for path in (search_path or self.path).iterdir():
if path.is_dir():
if str(path).endswith(".table"): # table found
self.ensure_table(cm, path, schema)
elif str(path).endswith(".view"): # table found
self.ensure_view(cm, path)
else:
if str(path).endswith(".schema"):
self.find_object(cm, path, schema=path.stem)
@staticmethod
def get_definition(path):
definition_path = pathlib.Path(path, "definition.sql")
if definition_path.exists():
with open(definition_path, "r") as f:
return f.read()
def ensure_view(self, cm, path):
sql = self.get_definition(path)
if not sql:
# TODO: add warning
return
cm.execute_sql(sql=sql, con_name=self.con_name)
def ensure_table(self, cm, path, schema):
sql = self.get_definition(path)
if not sql:
# TODO: add warning
return
cm.execute_sql(sql=sql, con_name=self.con_name)
# check for example data
data_path = pathlib.Path(path, "data.csv")
if data_path.exists():
table_name = path.stem
with open(data_path, "r") as f:
data_df = pd.read_csv(f)
con = cm.get_engine(con_name=self.con_name)
kwargs = {
"name": table_name,
"con": con,
"if_exists": "append",
"index": False
}
if schema:
kwargs["schema"] = schema
data_df.to_sql(**kwargs)
class Sqema:
"""The Sqema class."""
def __init__(self, cm: simqle.ConnectionManager,
sqema_directory="./sqema.sqema"):
"""
Initialise a Sqema class.
args:
- cm: a simqle.ConnectionManager object
- sqema_directory: the directory that describes the SQL schema
"""
self.cm = cm
self.sqema_directory = sqema_directory
self.database_roots = []
def ensure_sql_environment(self):
"""Ensure a SQL environment."""
root_path = pathlib.Path(self.sqema_directory)
# look for possible database paths
for connection in self.cm.config["connections"]:
con_name = connection["name"]
database_path = pathlib.Path(root_path, con_name + ".database")
if database_path.exists():
self.database_roots.append(DatabaseRoot(database_path,
con_name))
for database_root in self.database_roots:
database_root.find_object(self.cm)
| StarcoderdataPython |
9623224 | import numpy as np
import glob
import torch.utils.data
class SLPWeightDataset(torch.utils.data.Dataset):
def __init__(self, cfg, phase):
position = cfg.SLP_DATASET.POSITION
cover_cond = cfg.SLP_DATASET.COVER_CONDITION
val_split = cfg.SLP_DATASET.VAL_SPLIT
if position == 'all':
all_positions = ['supine', 'left', 'right']
elif position == 'lateral':
all_positions = ['left', 'right']
else:
all_positions = [position]
self.data_list = []
self.physics = []
danalab_physics = np.load('../dataset/SLP/danaLab/physiqueData.npy')
simlab_physics = np.load('../dataset/SLP/simLab/physiqueData.npy')
for position in all_positions:
if cover_cond == 'cover12':
cloud_list_1 = sorted(
glob.glob('../dataset/SLP/3d_data_{}_cover1/*bed_pcd.npy'.format(position)))
cloud_list_2 = sorted(
glob.glob('../dataset/SLP/3d_data_{}_cover2/*bed_pcd.npy'.format(position)))
elif cover_cond in ['uncover', 'cover1', 'cover2']:
#TODO: in pre-processing script, use correct naming for destination folders (3d_data_xxx_UNCOVER)
cloud_list = sorted(glob.glob('../dataset/SLP/3d_data_{}_{}/*bed_pcd.npy'.format(position, cover_cond)))
else:
raise ValueError
if cover_cond == 'cover12':
if phase == 'train':
# use first 60 subjects for training
data_list = cloud_list_1[:900] + cloud_list_2[:900]
physics = np.concatenate((danalab_physics[:60, :], danalab_physics[:60, :]), axis=0)
elif phase == 'val':
if val_split == 'dana':
# use subjects 61-102 for evaluation on danaLab data
data_list = cloud_list_1[900:1530] + cloud_list_2[900:1530]
physics = np.concatenate((danalab_physics[60:, :], danalab_physics[60:, :]), axis=0)
elif val_split == 'sim':
data_list = cloud_list_1[1530:] + cloud_list_2[1530:]
# for cover2 of simLab, depth data is not available for subjects 3 and 4
physics = np.concatenate(
(simlab_physics, simlab_physics[:2, :], simlab_physics[4:, :]), axis=0)
else:
raise ValueError
else:
if phase == 'train':
data_list = cloud_list[:900]
physics = danalab_physics[:60, :]
else:
if val_split == 'dana':
data_list = cloud_list[900:1530]
physics = danalab_physics[60:, :]
elif val_split == 'sim':
data_list = cloud_list[1530:]
if cover_cond == 'cover2':
# for cover2 of simLab, depth data is not available for subjects 3 and 4
physics = np.concatenate((simlab_physics[:2, :], simlab_physics[4:, :]), axis=0)
else:
physics = simlab_physics
else:
raise ValueError
self.data_list.extend(data_list)
if len(self.physics) == 0:
self.physics = physics
else:
self.physics = np.concatenate((self.physics, physics), axis=0)
self.num_points = cfg.INPUT.NUM_POINTS
self.normalize_output = cfg.INPUT.NORMALIZE_OUTPUT
self.min_weight_train = np.min(danalab_physics[:60, 2])
self.max_weight_train = np.max(danalab_physics[:60, 2])
self.is_train = True if phase == 'train' else False
self.rot_degree = cfg.INPUT.ROTATION_DEGREE
self.voxelize = cfg.INPUT.VOXELIZE
self.grid_shape = np.array(cfg.INPUT.VOXEL_GRID_SHAPE)
self.grid_size = np.array(cfg.INPUT.VOXEL_GRID_SIZE)
self.min_cloud_values = np.array(cfg.INPUT.MIN_CLOUD_VALUES)
def __getitem__(self, idx):
pcd = np.float32(np.load(self.data_list[idx]))
pcd -= np.mean(pcd, axis=0)
# rotatio around the z-axis
if self.is_train and self.rot_degree > 0.:
theta = np.deg2rad(np.random.uniform(-self.rot_degree, self.rot_degree))
rotation_matrix = np.float32(np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0., 0., 1.], ]))
pcd = np.dot(pcd, rotation_matrix)
if not self.voxelize:
pcd = torch.from_numpy(np.transpose(pcd))
# subsample input points
if self.num_points > 0:
pts_size = pcd.size(1)
if pts_size >= self.num_points:
if self.is_train:
permutation = torch.randperm(pcd.size(1))
else:
permutation = torch.from_numpy(np.random.default_rng(12345).permutation(pcd.size(1)))
pcd = pcd[:, permutation]
pcd = pcd[:, :self.num_points]
else:
if self.is_train:
pts_idx = torch.from_numpy(np.random.choice(pts_size, self.num_points, replace=True))
else:
pts_idx = torch.from_numpy(
np.random.default_rng(12345).choice(pts_size, self.num_points, replace=True))
pcd = pcd[:, pts_idx]
input = pcd
else:
input = self.cloud2vol(pcd)
weight = self.physics[int(idx / 15), 2]
# normalize target weight to [0, 1]
if self.normalize_output:
weight = (weight - self.min_weight_train) / (self.max_weight_train - self.min_weight_train)
target = torch.from_numpy(np.float32([weight]))
return input, target, idx
def __len__(self):
return len(self.data_list)
def cloud2vol(self, cloud):
cloud -= self.min_cloud_values
cloud = cloud / (self.grid_size / self.grid_shape)
cloud = np.floor(cloud).astype(int)
for i in range(3):
cloud[:, i] = np.clip(cloud[:, i], 0, self.grid_shape[i] - 1)
vol = np.zeros([1, *self.grid_shape])
vol[0, cloud[:, 0], cloud[:, 1], cloud[:, 2]] = 1.
vol = np.float32(vol)
return vol
| StarcoderdataPython |
6505730 | __version__ = '0.0.1'
import pyperclip
def get_clipboard_data():
return pyperclip.paste()
def set_clipboard_data(data):
return pyperclip.copy(data)
| StarcoderdataPython |
106539 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Flask-Script subclasses."""
from flask import g, json, _request_ctx_stack, current_app, request
from flask_principal import identity_changed, Identity
from flask_script import (Command as BaseCommand, Manager as BaseManager,
Shell)
from werkzeug.datastructures import Headers
from .utils import load_config
from ..accounts import find_user_id, get_user, clear_get_user_cache
from ..core import sns, principals, cache, parse
from ..services import high_rq, low_rq
from .. import accounts, contacts, yos
from .. import models
def login(user_id):
"""Authenticates the current thread with a specific identity"""
# Prepare the fake request.
current_app.preprocess_request()
# Create an identity.
identity = Identity(user_id)
# Set this identity on the thread.
principals.set_identity(identity)
# Tell listeners that the identity has changed.
identity_changed.send(current_app, identity=identity)
print "Now impersonating %s: ObjectId('%s')" % (identity.user.username,
identity.id)
class Command(BaseCommand):
def __call__(self, app=None, *args, **kwargs):
self.client = app.test_client()
self.config = load_config() or {}
self.jwt_token = self.config.get('jwt_token')
super(Command, self).__call__(app=app, *args, **kwargs)
def jsonpost(self, *args, **kwargs):
"""Convenience method for making JSON POST requests."""
kwargs.setdefault('content_type', 'application/json')
if 'data' in kwargs:
kwargs['data'] = json.dumps(kwargs['data'])
if 'jwt_token' in kwargs:
token = kwargs.pop('jwt_token')
headers = Headers()
headers.add('Authorization', 'Bearer ' + token)
kwargs.setdefault('headers', headers)
return self.client.post(*args, **kwargs)
class YoShell(Shell):
"""A normal script shell with better context"""
def __init__(self, *args, **kwargs):
super(YoShell, self).__init__(*args, **kwargs)
def get_context(self):
return dict(accounts=accounts,
app=_request_ctx_stack.top.app,
cache=cache,
clear_get_user_cache=clear_get_user_cache,
contacts=contacts,
get_user=get_user,
high_rq=high_rq,
low_rq=low_rq,
me=g.identity.user,
models=models,
request=request,
sns=sns,
yos=yos)
class LoggedInYoShell(YoShell):
"""A pre-authenticated shell with a useful context"""
def __init__(self, *args, **kwargs):
super(LoggedInYoShell, self).__init__(*args, **kwargs)
def run(self, *args, **kwargs):
if hasattr(_request_ctx_stack.top.app, 'impersonate'):
username = getattr(_request_ctx_stack.top.app, 'impersonate')
user_id = find_user_id(username=username)
login(user_id)
super(LoggedInYoShell, self).run(*args, **kwargs)
class Manager(BaseManager):
"""Manager subclass that we currently have no use for"""
def __init__(self, *args, **kwargs):
super(Manager, self).__init__(*args, **kwargs)
def run(self):
super(Manager, self).run()
| StarcoderdataPython |
6589615 | <filename>recipes/Python/475113_HTML_Help/recipe-475113.py
================================================================================
html_help.py
================================================================================
import time
import Zam
class html_table:
def __init__(self, rows, columns, indent, style):
self.__matrix = Zam.matrix(rows, columns, '')
self.__indent = indent
self.__style = style
self.__table_option = ''
self.__row_option = ''
self.__column_option = ''
def mutate(self, row, column, text):
assert type(text) is str
self.__matrix[row][column] = text
return self
def access(self, row, column):
return self.__matrix[row][column]
def table_option(self, string):
assert type(string) is str
self.__table_option = string
return self
def row_option(self, string):
assert type(string) is str
self.__row_option = string
return self
def column_option(self, string):
assert type(string) is str
self.__column_option = string
return self
def html(self):
html = self.__style * self.__indent + '<table'
if self.__table_option:
html += ' ' + self.__table_option
html += '>\n'
for row in self.__matrix:
html += self.__style * (self.__indent + 1) + '<tr'
if self.__row_option:
html += ' ' + self.__row_option
html += '>\n'
for item in row:
html += self.__style * (self.__indent + 2) + '<td'
if self.__column_option:
html += ' ' + self.__column_option
html += '>\n'
html += ''.join([self.__style * (self.__indent + 3) + line + '\n' for line in item.splitlines()])
html += self.__style * (self.__indent + 2) + '</td>\n'
html += self.__style * (self.__indent + 1) + '</tr>\n'
return html + self.__style * self.__indent + '</table>'
class html_month:
def __init__(self, year, month, indent, style):
self.matrix = matrix = self.__make_matrix(year, month)
self.__table = html_table(len(matrix) + 1, 7, indent, style)
for index, item in enumerate(('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday')):
self.__table.mutate(0, index, '<b>' + item + '</b>')
for row in range(len(matrix)):
for column in range(7):
if matrix[row][column]:
self.__table.mutate(row + 1, column, '<b>' + str(matrix[row][column]).zfill(2) + '</b>\n<hr>\n')
def __make_matrix(self, year, month):
rows = [Zam.array(7, 0)]
row = 0
now = time.localtime(time.mktime(time.strptime(str(year).zfill(2) + ' ' + str(month).zfill(2) + ' 01', '%y %m %d')) + 14400)
self.__first_day = (now.tm_wday + 1) % 7
once = False
while now.tm_mon == month:
if once:
if now.tm_wday == 6:
rows.append(Zam.array(7, 0))
row += 1
else:
once = True
rows[row][(now.tm_wday + 1) % 7] = now.tm_mday
self.__days_in_month = now.tm_mday
now = time.localtime(time.mktime(now) + 86400)
return rows
def mutate(self, day, text):
row, column = self.__get_pos(day)
self.__table.mutate(row, column, self.__table.access(row, column)[:15] + text)
return self
def access(self, day):
row, column = self.__get_pos(day)
return self.__table.access(row, column)[15:]
def __get_pos(self, day):
assert 1 <= day <= self.__days_in_month
pos = self.__first_day - 1 + day
return pos / 7 + 1, pos % 7
def table_option(self, string):
self.__table.table_option(string)
return self
def row_option(self, string):
self.__table.row_option(string)
return self
def column_option(self, string):
self.__table.column_option(string)
return self
def html(self):
return self.__table.html()
================================================================================
Zam.py
================================================================================
# Name & Description
# ==================
'''Support module for array and matrix use.
This module provides two classes that emulate one and two
dimentional lists with fixed sizes but mutable internals.'''
# Data & Imports
# ==============
__all__ = ['array', 'matrix']
__version__ = '1.1'
import sys
# Public Names
# ============
class array(object):
'''array(length) -> new array
array(length, value) -> initialized from value'''
def __init__(self, length, value=None):
'''x.__init__(...) initializes x'''
self.__data = range(length)
for index in range(length):
self.__data[index] = value
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = value
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = None
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
return value in self.__data
class matrix(object):
'''matrix(rows, columns) -> new matrix
matrix(rows, columns, value) -> initialized from value'''
def __init__(self, rows, columns, value=None):
'''x.__init__(...) initializes x'''
self.__data = array(rows)
for index in range(rows):
self.__data[index] = array(columns, value)
def __repr__(self):
'''x.__repr__() <==> repr(x)'''
return repr(self.__data)
def __len__(self):
'''x.__len__() <==> len(x)'''
return len(self.__data)
def __getitem__(self, key):
'''x.__getitem__(y) <==> x[y]'''
return self.__data[key]
def __setitem__(self, key, value):
'''x.__setitem__(i, y) <==> x[i]=y'''
self.__data[key] = array(len(self.__data[key]), value)
def __delitem__(self, key):
'''x.__delitem__(y) <==> del x[y]'''
self.__data[key] = array(len(self.__data[key]))
def __iter__(self):
'''x.__iter__() <==> iter(x)'''
return iter(self.__data)
def __contains__(self, value):
'''x.__contains__(y) <==> y in x'''
for item in self.__data:
if value in item:
return True
return False
# Private Names
# =============
def main():
print 'Content-Type: text/plain'
print
print file(sys.argv[0]).read()
# Execute Main
# ============
if __name__ == '__main__':
main()
| StarcoderdataPython |
12808643 | from django import forms
from .models import Resource
class AddNewResourceForm(forms.ModelForm):
"""
"""
class Meta:
model = Resource
fields = ("type", "link")
# fields = ("name", "type", "description", "link")
def __init__(self, *args, **kwargs):
"""
@param args:
@type args:
@param kwargs:
@type kwargs:
"""
super(AddNewResourceForm, self).__init__(*args, **kwargs)
# self.fields['name'].widget.attrs.update({'class': 'new-res-name', 'placeholder': 'max 150 characters'})
self.fields['type'].widget.attrs.update({'class': 'new-res-type', 'placeholder': 'max 50 characters'})
# self.fields['description'].widget.attrs.update({'class': 'new-res-description',
# 'placeholder': 'max 250 characters'})
self.fields['link'].widget.attrs.update({'class': 'new-res-link'})
| StarcoderdataPython |
311353 | <gh_stars>10-100
import pathlib
import requests
import zipfile
URL = "https://web.archive.org/web/20200318000445/http://archive.ics.uci.edu/ml/machine-learning-databases/00388/data.csv"
def download(from_path, to_path):
if not to_path.exists():
try:
r = requests.get(url=from_path)
with open(to_path, "wb") as file:
file.write(r.content)
except:
print("error downloading {}".format(str(from_path)))
if __name__ == "__main__":
print("downloading eeg epilepsy data")
download(URL, to_path=pathlib.Path("dataset.csv"))
| StarcoderdataPython |
6564497 | from __future__ import print_function
import numpy as np
import os
import glob
import six
from .image import *
class NpzGeneratorDataset(object):
'''
Get the list of objects from a folder full of NP arrays.
'''
def __init__(self, name, split=0.1, preload=False):
'''
Set name of directory to load files from
Parameters:
-----------
name: the directory
split: portion of the data files reserved for testing/validation
preload: load all files into memory when starting up
'''
self.name = name
self.split = split
self.train = []
self.test = []
self.preload = preload
self.preload_cache = {}
# list of keys which contain lists of jpeg files
self.load_jpeg = []
# list of keys which contain lists of png files
self.load_png = []
self.file_extension = 'npz'
self.file = None
def write(self, *args, **kwargs):
raise NotImplementedError('this dataset does not save things')
def load(self, success_only=False, verbose=0, max_img_size=224):
'''
Read the file; get the list of acceptable entries; split into train and
test sets.
'''
files = glob.glob(os.path.expanduser(self.name))
files.sort()
sample = {}
i = 0
acceptable_files = []
# if verbose > 0:
# print('files: ' + str(files))
for i, filename in enumerate(files):
if self.file_extension not in filename or filename[0] == '.':
continue
# Don't load error failures -- they're bad files
if 'error.failure' in filename:
continue
if success_only and 'success' not in filename:
continue
if i < 1:
# load the first file to determine the structure of data
# contained within. Note, this assumes the first is
# a good example to start with!
print('Extracting dataset structure from file: ' + str(filename))
with self._load(filename) as fsample:
for key, value in six.iteritems(fsample):
# Hack. shouldn't be duplicated here
if key in self.load_jpeg or key in self.load_png:
value = ConvertImageListToNumpy(value)
# Hack. Resize for oversized data
shp = value.shape
if len(shp) == 4 and shp[-1] == 3 and \
(shp[1] > max_img_size or shp[2] > max_img_size):
value = np.zeros((shp[0], 224, 224, 3), dtype=np.float32)
if value.shape[0] == 0:
print('key ' + str(key) + ' had 0 entries, skipping sample')
# sample = {}
continue
if key not in sample:
print('adding key to sample: ' + str(key))
sample[key] = np.array(value)
else:
# Note: do not collect multiple samples anymore; this
# hould never be reached
sample[key] = np.concatenate([sample[key], value], axis=0)
i += 1
acceptable_files.append(filename)
if verbose > 0:
print('files that will be used in dataset: \n' + str(acceptable_files))
idx = np.array(range(len(acceptable_files)))
length = max(1, int(self.split * len(acceptable_files)))
print("---------------------------------------------")
print("Loaded data.")
print("# Total examples:", len(acceptable_files))
print("# Validation examples:", length)
print("---------------------------------------------")
self.test = [acceptable_files[i] for i in idx[:length]]
self.train = [acceptable_files[i] for i in idx[length:]]
for i, filename in enumerate(self.test):
#print("%d:"%(i+1), filename)
if filename in self.train:
raise RuntimeError('error with test/train setup! ' +
filename + ' in training!')
np.random.shuffle(self.test)
np.random.shuffle(self.train)
if self.preload:
print("Preloading all files...")
for f in self.test + self.train:
nm = os.path.join(self.name, f)
self.preload_cache[nm] = self._load(nm)
return sample # return numpy list
def sampleTrainFilename(self):
return os.path.join(self.name,
self.train[np.random.randint(len(self.train))])
def sampleTestFilename(self):
return os.path.join(self.name,
self.test[np.random.randint(len(self.test))])
def testFiles(self):
return self.test
def trainFiles(self):
return self.train
def numTest(self):
return len(self.test)
def loadTest(self, i):
if i > len(self.test):
raise RuntimeError('index %d greater than number of files' % i)
filename = self.test[i]
success = 'success' in filename
nm = os.path.join(self.name, filename)
if nm in self.preload_cache:
return self.preload_cache[nm], success
else:
return self._load(nm), success
def sampleTrain(self):
filename = self.sampleTrainFilename()
if filename in self.preload_cache:
sample = self.preload_cache[filename]
else:
try:
sample = self._load(filename)
except Exception as e:
raise RuntimeError("Could not load file " + filename +
": " + str(e))
return sample, filename
def sampleTest(self):
filename = self.sampleTestFilename()
if filename in self.preload_cache:
sample = self.preload_cache[filename]
else:
try:
sample = self._load(filename)
except Exception as e:
raise RuntimeError("Could not load file " + filename + ": " +
str(e))
return sample, filename
def _load(self, filename):
self.file = filename
return self
# Interface for `with`
def __enter__(self):
return np.load(self.file)
def __exit__(self, *args):
self.file = None
def loadFile(self, filename):
full_filename = os.path.join(self.name, filename)
return self._load(full_filename)
| StarcoderdataPython |
11305423 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# flake8: noqa
from qiniu import Auth, put_file, etag, urlsafe_base64_encode
access_key = '<KEY>'
secret_key = '<KEY>'
# 初始化Auth状态å
q = Auth(access_key, secret_key)
# 你要测试的空间, 并且这个key在你空间中存在
bucket_name = 'crawler-pub'
key = 'mov/crawler.mov'
# 指定转码使用的队列名称
pipeline = ''
# 设置转码参数(以视频转码为例)
fops = 'avthumb/mp4/ab/128k/ar/22050/acodec/libfaac/r/30/vb/300k/vcodec/libx264/s/320x240/autoscale/1/stripmeta/0'
# 通过添加'|saveas'参数,指定处理后的文件保存的bucket和key,不指定默认保存在当前空间,bucket_saved为目标bucket,bucket_saved为目标key
saveas_key = urlsafe_base64_encode('crawler-pub:crawler-after.mov')
fops = fops+'|saveas/'+saveas_key
# 在上传策略中指定fobs和pipeline
policy = {
'persistentOps': fops,
'persistentPipeline': pipeline
}
token = q.upload_token(bucket_name, key, 3600, policy)
localfile = './python_video.flv'
ret, info = put_file(token, key, localfile)
print(info)
assert ret['key'] == key
assert ret['hash'] == etag(localfile)
| StarcoderdataPython |
3416658 | <filename>opentnsim/core.py
"""Main module."""
# package(s) related to time, space and id
import json
import logging
import uuid
# you need these dependencies (you can get these from anaconda)
# package(s) related to the simulation
import simpy
import random
import networkx as nx
import numpy as np
import math
import pandas as pd
# spatial libraries
import pyproj
import shapely.geometry
# additional packages
import datetime, time
logger = logging.getLogger(__name__)
class SimpyObject:
"""General object which can be extended by any class requiring a simpy environment
env: a simpy Environment
"""
def __init__(self, env, *args, **kwargs):
super().__init__(*args, **kwargs)
self.env = env
class HasResource(SimpyObject):
"""Something that has a resource limitation, a resource request must be granted before the object can be used.
nr_resources: nr of requests that can be handled simultaneously"""
def __init__(self, nr_resources=1, priority=False, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.resource = (
simpy.PriorityResource(self.env, capacity=nr_resources)
if priority
else simpy.Resource(self.env, capacity=nr_resources)
)
class Identifiable:
"""Mixin class: Something that has a name and id
name: a name
id: a unique id generated with uuid"""
def __init__(self, name, id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.name = name
# generate some id, in this case based on m
self.id = id if id else str(uuid.uuid1())
class Locatable:
"""Mixin class: Something with a geometry (geojson format)
geometry: can be a point as well as a polygon"""
def __init__(self, geometry, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.geometry = geometry
self.node = None
class Neighbours:
"""Can be added to a locatable object (list)
travel_to: list of locatables to which can be travelled"""
def ___init(self, travel_to, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.neighbours = travel_to
class HasLength(SimpyObject):
"""Mixin class: Something with a storage capacity
capacity: amount the container can hold
level: amount the container holds initially
total_requested: a counter that helps to prevent over requesting"""
def __init__(self, length, remaining_length=0, total_requested=0, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.length = simpy.Container(self.env, capacity = length, init=remaining_length)
self.pos_length = simpy.Container(self.env, capacity = length, init=remaining_length)
class HasContainer(SimpyObject):
"""Mixin class: Something with a storage capacity
capacity: amount the container can hold
level: amount the container holds initially
container: a simpy object that can hold stuff
total_requested: a counter that helps to prevent over requesting"""
def __init__(self, capacity, level=0, total_requested=0, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.container = simpy.Container(self.env, capacity, init=level)
self.total_requested = total_requested
@property
def is_loaded(self):
return True if self.container.level > 0 else False
@property
def filling_degree(self):
return self.container.level / self.container.capacity
class Log(SimpyObject):
"""Mixin class: Something that has logging capability
log: log message [format: 'start activity' or 'stop activity']
t: timestamp
value: a value can be logged as well
geometry: value from locatable (lat, lon)"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.log = {"Message": [], "Timestamp": [], "Value": [], "Geometry": []}
def log_entry(self, log, t, value, geometry_log):
"""Log"""
self.log["Message"].append(log)
self.log["Timestamp"].append(datetime.datetime.fromtimestamp(t))
self.log["Value"].append(value)
self.log["Geometry"].append(geometry_log)
def get_log_as_json(self):
json = []
for msg, t, value, geometry_log in zip(
self.log["Message"],
self.log["Timestamp"],
self.log["Value"],
self.log["Geometry"],
):
json.append(
dict(message=msg, time=t, value=value, geometry_log=geometry_log)
)
return json
class VesselProperties:
"""Mixin class: Something that has vessel properties
This mixin is updated to better accommodate the ConsumesEnergy mixin
type: can contain info on vessel type (avv class, cemt_class or other)
B: vessel width
L: vessel length
H_e: vessel height unloaded
H_f: vessel height loaded
T_e: draught unloaded
T_f: draught loaded
Add information on possible restrictions to the vessels, i.e. height, width, etc.
"""
def __init__(
self,
type,
B,
L,
H_e,
H_f,
T_e,
T_f,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
self.type = type
self.B = B
self.L = L
self.H_e = H_e
self.H_f = H_e
self.T_e = T_e
self.T_f = T_f
@property
def H(self):
""" Calculate current height based on filling degree """
return (
self.filling_degree * (self.H_f - self.H_e)
+ self.H_e
)
@property
def T(self):
""" Calculate current draught based on filling degree
Here we should implement the rules from Van Dorsser et al
https://www.researchgate.net/publication/344340126_The_effect_of_low_water_on_loading_capacity_of_inland_ships
"""
return (
self.filling_degree * (self.T_f - self.T_e)
+ self.T_e
)
def get_route(
self,
origin,
destination,
graph=None,
minWidth=None,
minHeight=None,
minDepth=None,
randomSeed=4,
):
""" Calculate a path based on vessel restrictions """
graph = graph if graph else self.env.FG
minWidth = minWidth if minWidth else 1.1 * self.B
minHeight = minWidth if minHeight else 1.1 * self.H
minDepth = minWidth if minDepth else 1.1 * self.T
# Check if information on restrictions is added to the edges
random.seed(randomSeed)
edge = random.choice(list(graph.edges(data=True)))
edge_attrs = list(edge[2].keys())
# IMPROVE THIS TO CHECK ALL EDGES AND COMBINATIONS OF RESTRICTIONS
if all(item in edge_attrs for item in ["Width", "Height", "Depth"]):
edges = []
nodes = []
for edge in graph.edges(data=True):
if (
edge[2]["Width"] >= minWidth
and edge[2]["Height"] >= minHeight
and edge[2]["Depth"] >= minDepth
):
edges.append(edge)
nodes.append(graph.nodes[edge[0]])
nodes.append(graph.nodes[edge[1]])
subGraph = graph.__class__()
for node in nodes:
subGraph.add_node(
node["name"],
name=node["name"],
geometry=node["geometry"],
position=(node["geometry"].x, node["geometry"].y),
)
for edge in edges:
subGraph.add_edge(edge[0], edge[1], attr_dict=edge[2])
try:
return nx.dijkstra_path(subGraph, origin, destination)
# return nx.bidirectional_dijkstra(subGraph, origin, destination)
except:
raise ValueError(
"No path was found with the given boundary conditions."
)
# If not, return shortest path
else:
return nx.dijkstra_path(graph, origin, destination)
class ConsumesEnergy:
"""Mixin class: Something that consumes energy.
P_installed: installed engine power [kW]
L_w: weight class of the ship (depending on carrying capacity) (classes: L1 (=1), L2 (=2), L3 (=3))
C_b: block coefficient ('fullness') [-]
nu: kinematic viscosity [m^2/s]
rho: density of the surrounding water [kg/m^3]
g: gravitational accelleration [m/s^2]
x: number of propellors [-]
eta_0: open water efficiency of propellor [-]
eta_r: relative rotative efficiency [-]
eta_t: transmission efficiency [-]
eta_g: gearing efficiency [-]
c_stern: determines shape of the afterbody [-]
one_k2: appendage resistance factor [-]
c_year: construction year of the engine [y]
"""
def __init__(
self,
P_installed,
L_w,
C_b,
nu=1 * 10 ** (-6), # kinematic viscosity
rho=1000,
g=9.81,
x=2, # number of propellors
eta_0=0.6,
eta_r=1.00,
eta_t=0.98,
eta_g=0.96,
c_stern=0,
one_k2=2.5,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
self.P_installed = P_installed
self.L_w = L_w
self.C_b = C_b
self.nu = nu
self.rho = rho
self.g = g
self.x = x
self.eta_0 = eta_0
self.eta_r = eta_r
self.eta_t = eta_t
self.eta_g = eta_g
self.c_stern = c_stern
self.one_k2 = one_k2
self.c_year = self.calculate_engine_age() # The construction year of the engine is now generated once, instead of for each time step
# The engine age and construction year of the engine is computed with the function below.
# The construction year of the engine is used in the emission functions (1) emission_factors_general and (2) correction_factors
def calculate_engine_age(self):
"""Calculating the construction year of the engine, dependend on a Weibull function with
shape factor 'k', and scale factor 'lmb', which are determined by the weight class L_w"""
# Determining which shape and scale factor to use, based on the weight class L_w = L1, L2 or L3
if self.L_w == 1: # Weight class L1
self.k = 1.3
self.lmb = 20.5
if self.L_w == 2: # Weight class L2
self.k = 1.12
self.lmb = 18.5
if self.L_w == 3: # Weight class L3
self.k = 1.26
self.lmb = 18.6
# The age of the engine
self.age = int(np.random.weibull(self.k) * self.lmb)
# Current year (TO DO: fix hardcoded year)
# self.year = datetime.date.year
self.year = 2021
# Construction year of the engine
self.c_year = self.year - self.age
print('The construction year of the engine is', self.c_year)
return self.c_year
def calculate_properties(self):
"""Calculate a number of basic vessel properties"""
self.C_M = 1.006 - 0.0056 * self.C_b ** (-3.56) # Midship section coefficient
self.C_wp = (1 + 2 * self.C_b) / 3 # Waterplane coefficient
self.C_p = self.C_b / self.C_M # Prismatic coefficient
self.delta = self.C_b * self.L * self.B * self.T # Water displacement
self.lcb = -13.5 + 19.4 * self.C_p # longitudinal center of buoyancy
self.L_R = self.L * (1 - self.C_p + (0.06 * self.C_p * self.lcb) / (
4 * self.C_p - 1)) # parameter reflecting the length of the run
self.A_T = 0.2 * self.B * self.T # transverse area of the transom
# Total wet area
self.S_T = self.L * (2 * self.T + self.B) * np.sqrt(self.C_M) * (
0.453 + 0.4425 * self.C_b - 0.2862 * self.C_M - 0.003467 * (
self.B / self.T) + 0.3696 * self.C_wp) # + 2.38 * (self.A_BT / self.C_b)
self.S_APP = 0.05 * self.S_T # Wet area of appendages
self.S_B = self.L * self.B # Area of flat bottom
self.D_s = 0.7 * self.T # Diameter of the screw
def calculate_frictional_resistance(self, V_0, h):
"""1) Frictional resistance
- 1st resistance component defined by Holtrop and Mennen (1982)
- A modification to the original friction line is applied, based on literature of Zeng (2018), to account for shallow water effects """
self.R_e = V_0 * self.L / self.nu # Reynolds number
self.D = h - self.T # distance from bottom ship to the bottom of the fairway
# Friction coefficient in deep water
self.Cf_0 = 0.075 / ((np.log10(self.R_e) - 2) ** 2)
# Friction coefficient proposed, taking into account shallow water effects
self.Cf_proposed = (0.08169 / ((np.log10(self.R_e) - 1.717) ** 2)) * (
1 + (0.003998 / (np.log10(self.R_e) - 4.393)) * (self.D / self.L) ** (-1.083))
# 'a' is the coefficient needed to calculate the Katsui friction coefficient
self.a = 0.042612 * np.log10(self.R_e) + 0.56725
self.Cf_katsui = 0.0066577 / ((np.log10(self.R_e) - 4.3762) ** self.a)
# The average velocity underneath the ship, taking into account the shallow water effect
if h / self.T <= 4:
self.V_B = 0.4277 * V_0 * np.exp((h / self.T) ** (-0.07625))
else:
self.V_B = V_0
# cf_proposed cannot be applied directly, since a vessel also has non-horizontal wet surfaces that have to be taken
# into account. Therefore, the following formula for the final friction coefficient 'C_f' is defined:
self.C_f = self.Cf_0 + (self.Cf_proposed - self.Cf_katsui) * (self.S_B / self.S_T) * (self.V_B / V_0) ** 2
# The total frictional resistance R_f [kN]:
self.R_f = (self.C_f * 0.5 * self.rho * (V_0 ** 2) * self.S_T) / 1000
def calculate_viscous_resistance(self):
"""2) Viscous resistance
- 2nd resistance component defined by Holtrop and Mennen (1982)
- Form factor (1 + k1) has to be multiplied by the frictional resistance R_f, to account for the effect of viscosity"""
# c_14 accounts for the specific shape of the afterbody
self.c_14 = 1 + 0.0011 * self.c_stern
# the form factor (1+k1) describes the viscous resistance
self.one_k1 = 0.93 + 0.487 * self.c_14 * ((self.B / self.L) ** 1.068) * ((self.T / self.L) ** 0.461) * (
(self.L / self.L_R) ** 0.122) * (((self.L ** 3) / self.delta) ** 0.365) * (
(1 - self.C_p) ** (-0.604))
def calculate_appendage_resistance(self, V_0):
"""3) Appendage resistance
- 3rd resistance component defined by Holtrop and Mennen (1982)
- Appendages (like a rudder, shafts, skeg) result in additional frictional resistance"""
# Frictional resistance resulting from wetted area of appendages: R_APP [kN]
self.R_APP = (0.5 * self.rho * (V_0 ** 2) * self.S_APP * self.one_k2 * self.C_f) / 1000
def karpov(self, V_0, h):
"""Intermediate calculation: Karpov
- The Karpov method computes a velocity correction that accounts for limited water depth (corrected velocity V2)
- V2 has to be implemented in the wave resistance and the residual resistance terms"""
# The Froude number used in the Karpov method is the depth related froude number F_nh
# The different alpha** curves are determined with a sixth power polynomial approximation in Excel
# A distinction is made between different ranges of Froude numbers, because this resulted in a better approximation of the curve
self.F_nh = V_0 / np.sqrt(self.g * h)
if self.F_nh <= 0.4:
if 0 <= h / self.T < 1.75:
self.alpha_xx = (-4 * 10 ** (
-12)) * self.F_nh ** 3 - 0.2143 * self.F_nh ** 2 - 0.0643 * self.F_nh + 0.9997
if 1.75 <= h / self.T < 2.25:
self.alpha_xx = -0.8333 * self.F_nh ** 3 + 0.25 * self.F_nh ** 2 - 0.0167 * self.F_nh + 1
if 2.25 <= h / self.T < 2.75:
self.alpha_xx = -1.25 * self.F_nh ** 4 + 0.5833 * self.F_nh ** 3 - 0.0375 * self.F_nh ** 2 - 0.0108 * self.F_nh + 1
if h / self.T >= 2.75:
self.alpha_xx = 1
if self.F_nh > 0.4:
if 0 <= h / self.T < 1.75:
self.alpha_xx = -0.9274 * self.F_nh ** 6 + 9.5953 * self.F_nh ** 5 - 37.197 * self.F_nh ** 4 + 69.666 * self.F_nh ** 3 - 65.391 * self.F_nh ** 2 + 28.025 * self.F_nh - 3.4143
if 1.75 <= h / self.T < 2.25:
self.alpha_xx = 2.2152 * self.F_nh ** 6 - 11.852 * self.F_nh ** 5 + 21.499 * self.F_nh ** 4 - 12.174 * self.F_nh ** 3 - 4.7873 * self.F_nh ** 2 + 5.8662 * self.F_nh - 0.2652
if 2.25 <= h / self.T < 2.75:
self.alpha_xx = 1.2205 * self.F_nh ** 6 - 5.4999 * self.F_nh ** 5 + 5.7966 * self.F_nh ** 4 + 6.6491 * self.F_nh ** 3 - 16.123 * self.F_nh ** 2 + 9.2016 * self.F_nh - 0.6342
if 2.75 <= h / self.T < 3.25:
self.alpha_xx = -0.4085 * self.F_nh ** 6 + 4.534 * self.F_nh ** 5 - 18.443 * self.F_nh ** 4 + 35.744 * self.F_nh ** 3 - 34.381 * self.F_nh ** 2 + 15.042 * self.F_nh - 1.3807
if 3.25 <= h / self.T < 3.75:
self.alpha_xx = 0.4078 * self.F_nh ** 6 - 0.919 * self.F_nh ** 5 - 3.8292 * self.F_nh ** 4 + 15.738 * self.F_nh ** 3 - 19.766 * self.F_nh ** 2 + 9.7466 * self.F_nh - 0.6409
if 3.75 <= h / self.T < 4.5:
self.alpha_xx = 0.3067 * self.F_nh ** 6 - 0.3404 * self.F_nh ** 5 - 5.0511 * self.F_nh ** 4 + 16.892 * self.F_nh ** 3 - 20.265 * self.F_nh ** 2 + 9.9002 * self.F_nh - 0.6712
if 4.5 <= h / self.T < 5.5:
self.alpha_xx = 0.3212 * self.F_nh ** 6 - 0.3559 * self.F_nh ** 5 - 5.1056 * self.F_nh ** 4 + 16.926 * self.F_nh ** 3 - 20.253 * self.F_nh ** 2 + 10.013 * self.F_nh - 0.7196
if 5.5 <= h / self.T < 6.5:
self.alpha_xx = 0.9252 * self.F_nh ** 6 - 4.2574 * self.F_nh ** 5 + 5.0363 * self.F_nh ** 4 + 3.3282 * self.F_nh ** 3 - 10.367 * self.F_nh ** 2 + 6.3993 * self.F_nh - 0.2074
if 6.5 <= h / self.T < 7.5:
self.alpha_xx = 0.8442 * self.F_nh ** 6 - 4.0261 * self.F_nh ** 5 + 5.313 * self.F_nh ** 4 + 1.6442 * self.F_nh ** 3 - 8.1848 * self.F_nh ** 2 + 5.3209 * self.F_nh - 0.0267
if 7.5 <= h / self.T < 8.5:
self.alpha_xx = 0.1211 * self.F_nh ** 6 + 0.628 * self.F_nh ** 5 - 6.5106 * self.F_nh ** 4 + 16.7 * self.F_nh ** 3 - 18.267 * self.F_nh ** 2 + 8.7077 * self.F_nh - 0.4745
if 8.5 <= h / self.T < 9.5:
if self.F_nh < 0.6:
self.alpha_xx = 1
if self.F_nh >= 0.6:
self.alpha_xx = -6.4069 * self.F_nh ** 6 + 47.308 * self.F_nh ** 5 - 141.93 * self.F_nh ** 4 + 220.23 * self.F_nh ** 3 - 185.05 * self.F_nh ** 2 + 79.25 * self.F_nh - 12.484
if h / self.T >= 9.5:
if self.F_nh < 0.6:
self.alpha_xx = 1
if self.F_nh >= 0.6:
self.alpha_xx = -6.0727 * self.F_nh ** 6 + 44.97 * self.F_nh ** 5 - 135.21 * self.F_nh ** 4 + 210.13 * self.F_nh ** 3 - 176.72 * self.F_nh ** 2 + 75.728 * self.F_nh - 11.893
self.V_2 = V_0 / self.alpha_xx
def calculate_wave_resistance(self, V_0, h):
"""4) Wave resistance
- 4th resistance component defined by Holtrop and Mennen (1982)
- When the speed or the vessel size increases, the wave making resistance increases
- In shallow water, the wave resistance shows an asymptotical behaviour by reaching the critical speed"""
self.karpov(V_0, h)
self.F_n = self.V_2 / np.sqrt(self.g * self.L) # Froude number
# parameter c_7 is determined by the B/L ratio
if self.B / self.L < 0.11:
self.c_7 = 0.229577 * (self.B / self.L) ** 0.33333
if self.B / self.L > 0.25:
self.c_7 = 0.5 - 0.0625 * (self.L / self.B)
else:
self.c_7 = self.B / self.L
# half angle of entrance in degrees
self.i_E = 1 + 89 * np.exp(-((self.L / self.B) ** 0.80856) * ((1 - self.C_wp) ** 0.30484) * (
(1 - self.C_p - 0.0225 * self.lcb) ** 0.6367) * ((self.L_R / self.B) ** 0.34574) * (
(100 * self.delta / (self.L ** 3)) ** 0.16302))
self.c_1 = 2223105 * (self.c_7 ** 3.78613) * ((self.T / self.B) ** 1.07961) * (90 - self.i_E) ** (-1.37165)
self.c_2 = 1 # accounts for the effect of the bulbous bow, which is not present at inland ships
self.c_5 = 1 - (0.8 * self.A_T) / (
self.B * self.T * self.C_M) # influence of the transom stern on the wave resistance
# parameter c_15 depoends on the ratio L^3 / delta
if (self.L ** 3) / self.delta < 512:
self.c_15 = -1.69385
if (self.L ** 3) / self.delta > 1727:
self.c_15 = 0
else:
self.c_15 = -1.69385 + (self.L / (self.delta ** (1 / 3)) - 8) / 2.36
# parameter c_16 depends on C_p
if self.C_p < 0.8:
self.c_16 = 8.07981 * self.C_p - 13.8673 * (self.C_p ** 2) + 6.984388 * (self.C_p ** 3)
else:
self.c_16 = 1.73014 - 0.7067
self.m_1 = 0.0140407 * (self.L / self.T) - 1.75254 * ((self.delta) ** (1 / 3) / self.L) - 4.79323 * (
self.B / self.L) - self.c_16
self.m_4 = 0.4 * self.c_15 * np.exp(-0.034 * (self.F_n ** (-3.29)))
if self.L / self.B < 12:
self.lmbda = 1.446 * self.C_p - 0.03 * (self.L / self.B)
else:
self.lmbda = 1.446 * self.C_p - 0.036
# parameters needed for RW_2
self.c_17 = 6919.3 * (self.C_M ** (-1.3346)) * ((self.delta / (self.L ** 3)) ** 2.00977) * (
(self.L / self.B - 2) ** 1.40692)
self.m_3 = -7.2035 * ((self.B / self.L) ** 0.326869) * ((self.T / self.B) ** 0.605375)
######### When Fn < 0.4
self.RW_1 = self.c_1 * self.c_2 * self.c_5 * self.delta * self.rho * self.g * np.exp(
self.m_1 * (self.F_n ** (-0.9)) + self.m_4 * np.cos(self.lmbda * (self.F_n ** (-2))))
######## When Fn > 0.5
self.RW_2 = self.c_17 * self.c_2 * self.c_5 * self.delta * self.rho * self.g * np.exp(
self.m_3 * (self.F_n ** (-0.9)) + self.m_4 * np.cos(self.lmbda * (self.F_n ** (-2))))
if self.F_n < 0.4:
self.R_W = self.RW_1 / 1000 # kN
if self.F_n > 0.55:
self.R_W = self.RW_2 / 1000 # kN
else:
self.R_W = (self.RW_1 + ((10 * self.F_n - 4) * (self.RW_2 - self.RW_1)) / 1.5) / 1000 # kN
def calculate_residual_resistance(self, V_0, h):
"""5) Residual resistance terms
- Holtrop and Mennen (1982) defined three residual resistance terms:
- 1) Resistance due to the bulbous bow (not incorporated since inland ships in general don't have a bulb)
- 2) Resistance due to immersed transom
- 3) Resistance due to model-ship correlation """
self.karpov(V_0, h)
# Resistance due to immersed transom: R_TR [kN]
self.F_nt = self.V_2 / np.sqrt(
2 * self.g * self.A_T / (self.B + self.B * self.C_wp)) # Froude number based on transom immersion
self.c_6 = 0.2 * (1 - 0.2 * self.F_nt) # Assuming F_nt < 5, this is the expression for coefficient c_6
self.R_TR = (0.5 * self.rho * (self.V_2 ** 2) * self.A_T * self.c_6) / 1000
# Model-ship correlation resistance: R_A [kN]
if self.T / self.L < 0.04:
self.c_4 = self.T / self.L
else:
self.c_4 = 0.04
self.c_2 = 1
self.C_A = 0.006 * (self.L + 100) ** (-0.16) - 0.00205 + 0.003 * np.sqrt(self.L / 7.5) * (
self.C_b ** 4) * self.c_2 * (0.04 - self.c_4)
####### Holtrop and Mennen in the document of Sarris, 2003 #######
self.R_A = (0.5 * self.rho * (self.V_2 ** 2) * self.S_T * self.C_A) / 1000 # kW
def calculate_total_resistance(self, V_0, h):
"""Total resistance:
The total resistance is the sum of all resistance components (Holtrop and Mennen, 1982) """
self.calculate_properties()
self.calculate_frictional_resistance(V_0, h)
self.calculate_viscous_resistance()
self.calculate_appendage_resistance(V_0)
self.calculate_wave_resistance(V_0, h)
self.calculate_residual_resistance(V_0, h)
# The total resistance R_tot [kN] = R_f * (1+k1) + R_APP + R_W + R_TR + R_A
self.R_tot = self.R_f * self.one_k1 + self.R_APP + self.R_W + self.R_TR + self.R_A
def calculate_total_power_required(self):
"""Total required power:
- The total required power is the sum of the power for systems on board (P_hotel) + power required for propulsion (P_BHP)
- The P_BHP depends on the calculated resistance"""
# ---- Required power for systems on board
self.P_hotel = 0.05 * self.P_installed
# ---- Required power for propulsion
# Effective Horse Power (EHP)
self.P_EHP = self.V_B * self.R_tot
# Calculation hull efficiency
dw = np.zeros(101) # velocity correction coefficient
counter = 0
if self.F_n < 0.2:
self.dw = 0
else:
self.dw = 0.1
self.w = 0.11 * (0.16 / self.x) * self.C_b * np.sqrt(
(self.delta ** (1 / 3)) / self.D_s) - self.dw # wake fraction 'w'
if self.x == 1:
self.t = 0.6 * self.w * (1 + 0.67 * self.w) # thrust deduction factor 't'
else:
self.t = 0.8 * self.w * (1 + 0.25 * self.w)
self.eta_h = (1 - self.t) / (1 - self.w) # hull efficiency eta_h
# Delivered Horse Power (DHP)
self.P_DHP = self.P_EHP / (self.eta_0 * self.eta_r * self.eta_h)
# Brake Horse Power (BHP)
self.P_BHP = self.P_DHP / (self.eta_t * self.eta_g)
self.P_tot = self.P_hotel + self.P_BHP
# Partial engine load (P_partial): needed in the 'Emission calculations'
if self.P_tot > self.P_installed:
self.P_partial = 1
else:
self.P_partial = self.P_tot / self.P_installed
print('The total power required is', self.P_tot, 'kW')
print('The partial load is', self.P_partial, 'kW')
def emission_factors_general(self):
"""General emission factors:
This function computes general emission factors, based on construction year of the engine.
- Based on literature TNO (2019)
Please note: later on a correction factor has to be applied to get the total emission factor"""
# The general emission factors of CO2, PM10 and NOX are based on the construction year of the engine
if self.c_year < 1974:
self.EM_CO2 = 756
self.EM_PM10 = 0.6
self.EM_NOX = 10.8
if 1975 <= self.c_year <= 1979:
self.EM_CO2 = 730
self.EM_PM10 = 0.6
self.EM_NOX = 10.6
if 1980 <= self.c_year <= 1984:
self.EM_CO2 = 714
self.EM_PM10 = 0.6
self.EM_NOX = 10.4
if 1985 <= self.c_year <= 1989:
self.EM_CO2 = 698
self.EM_PM10 = 0.5
self.EM_NOX = 10.1
if 1990 <= self.c_year <= 1994:
self.EM_CO2 = 698
self.EM_PM10 = 0.4
self.EM_NOX = 10.1
if 1995 <= self.c_year <= 2002:
self.EM_CO2 = 650
self.EM_PM10 = 0.3
self.EM_NOX = 9.4
if 2003 <= self.c_year <= 2007:
self.EM_CO2 = 635
self.EM_PM10 = 0.3
self.EM_NOX = 9.2
if 2008 <= self.c_year <= 2019:
self.EM_CO2 = 635
self.EM_PM10 = 0.2
self.EM_NOX = 7
if self.c_year > 2019:
if self.L_w == 1:
self.EM_CO2 = 650
self.EM_PM10 = 0.1
self.EM_NOX = 2.9
else:
self.EM_CO2 = 603
self.EM_PM10 = 0.015
self.EM_NOX = 2.4
print('The general emission factor of CO2 is', self.EM_CO2, 'g/kWh')
print('The general emission factor of PM10 is', self.EM_PM10, 'g/kWh')
print('The general emission factor CO2 is', self.EM_NOX, 'g/kWh')
def correction_factors(self):
"""Correction factors:
- The correction factors have to be multiplied by the general emission factors, to get the total emission factors
- The correction factor takes into account the effect of the partial engine load
- When the partial engine load is low, the correction factors are higher (engine is less efficient)
- Based on literature TNO (2019)"""
self.calculate_total_power_required() # You need the P_partial values
# Import the correction factors table
self.corf = pd.read_excel(r'correctionfactors.xlsx')
for i in range(20):
# If the partial engine load is smaller or equal to 5%, the correction factors corresponding to P_partial = 5% are assigned.
if self.P_partial <= self.corf.iloc[0, 0]:
self.corf_CO2 = self.corf.iloc[0, 5]
self.corf_PM10 = self.corf.iloc[0, 6]
# The NOX correction factors are dependend on the construction year of the engine and the weight class
if self.c_year < 2008:
self.corf_NOX = self.corf.iloc[0, 1] # <= CCR-1 class
if 2008 <= self.c_year <= 2019:
self.corf_NOX = self.corf.iloc[0, 2] # CCR-2 / Stage IIIa
if self.c_year > 2019:
if self.L_w == 1: #
self.corf_NOX = self.corf.iloc[
0, 3] # Stage V: IWP/IWA-v/c-3 class (vessels with P <300 kW: assumed to be weight class L1)
else:
self.corf_NOX = self.corf.iloc[
0, 4] # Stage V:IWP/IWA-v/c-4 class (vessels with P >300 kw: assumed to be weight class L2-L3)
# If the partial engine load is greater than 5%:
# It is determined inbetween which two percentages in the table the partial engine load lies
# The correction factor is determined by means of linear interpolation
elif self.corf.iloc[i, 0] < self.P_partial <= self.corf.iloc[i + 1, 0]:
self.corf_CO2 = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 5] - self.corf.iloc[i, 5])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[i, 5]
self.corf_PM10 = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 6] - self.corf.iloc[i, 6])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[i, 6]
if self.c_year < 2008:
self.corf_NOX = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 1] - self.corf.iloc[i, 1])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[i, 1]
if 2008 <= self.c_year <= 2019:
self.corf_NOX = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 2] - self.corf.iloc[i, 2])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[i, 2]
if self.c_year > 2019:
if self.L_w == 1:
self.corf_NOX = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 3] - self.corf.iloc[i, 3])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[
i, 3]
else:
self.corf_NOX = ((self.P_partial - self.corf.iloc[i, 0]) * (
self.corf.iloc[i + 1, 4] - self.corf.iloc[i, 4])) / (
self.corf.iloc[i + 1, 0] - self.corf.iloc[i, 0]) + self.corf.iloc[
i, 4]
# If the partial engine load is => 100%, the correction factors corresponding to P_partial = 100% are assigned.
elif self.P_partial >= self.corf.iloc[19, 0]:
self.corf_CO2 = self.corf.iloc[19, 5]
self.corf_PM10 = self.corf.iloc[19, 6]
# The NOX correction factors are dependend on the construction year of the engine and the weight class
if self.c_year < 2008:
self.corf_NOX = self.corf.iloc[19, 1] # <= CCR-1 class
if 2008 <= self.c_year <= 2019:
self.corf_NOX = self.corf.iloc[19, 2] # CCR-2 / Stage IIIa
if self.c_year > 2019:
if self.L_w == 1: #
self.corf_NOX = self.corf.iloc[
19, 3] # Stage V: IWP/IWA-v/c-3 class (vessels with P <300 kW: assumed to be weight class L1)
else:
self.corf_NOX = self.corf.iloc[
19, 4] # Stage V:IWP/IWA-v/c-4 class (vessels with P >300 kw: assumed to be weight class L2-L3)
print('Correction factor of CO2 is', self.corf_CO2)
print('Correction factor of PM10 is', self.corf_PM10)
print('Correction factor of NOX is', self.corf_NOX)
def calculate_emission_factors_total(self):
"""Total emission factors:
- The total emission factors can be computed by multiplying the general emission factor by the correction factor"""
print('The construction year of the engine is', self.c_year)
# self.calculate_engine_age() #You need the values of c_year
self.emission_factors_general() # You need the values of the general emission factors of CO2, PM10, NOX
self.correction_factors() # You need the correction factors of CO2, PM10, NOX
# The total emission factor is calculated by multiplying the general emission factor (EM_CO2 / EM_PM10 / EM_NOX)
# By the correction factor (corf_CO2 / corf_PM10 / corf_NOX)
self.Emf_CO2 = self.EM_CO2 * self.corf_CO2
self.Emf_PM10 = self.EM_PM10 * self.corf_PM10
self.Emf_NOX = self.EM_NOX * self.corf_NOX
print('The total emission factor of CO2 is', self.Emf_CO2, 'g/kWh')
print('The total emission factor of PM10 is', self.Emf_PM10, 'g/kWh')
print('The total emission factor CO2 is', self.Emf_NOX, 'g/kWh')
class Routeable:
"""Mixin class: Something with a route (networkx format)
route: a networkx path"""
def __init__(self, route, complete_path=None, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.route = route
self.complete_path = complete_path
class IsLockWaitingArea(HasResource, Identifiable, Log):
"""Mixin class: Something has lock object properties
properties in meters
operation in seconds
"""
def __init__(
self,
node,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
waiting_area_resources = 100
self.waiting_area = {
node: simpy.PriorityResource(self.env, capacity=waiting_area_resources),
}
#departure_resources = 4
#self.departure = {
# node: simpy.PriorityResource(self.env, capacity=departure_resources),
#}
class IsLockLineUpArea(HasResource, HasLength, Identifiable, Log):
"""Mixin class: Something has lock object properties
properties in meters
operation in seconds
"""
def __init__(
self,
node,
lineup_length,
*args,
**kwargs
):
super().__init__(length = lineup_length, remaining_length = lineup_length, *args, **kwargs)
"""Initialization"""
self.lock_queue_length = 0
# Lay-Out
self.enter_line_up_area = {
node: simpy.PriorityResource(self.env, capacity=1),
}
self.line_up_area = {
node: simpy.PriorityResource(self.env, capacity=100),
}
self.converting_while_in_line_up_area = {
node: simpy.PriorityResource(self.env, capacity=1),
}
self.pass_line_up_area = {
node: simpy.PriorityResource(self.env, capacity=1),
}
class HasLockDoors(SimpyObject):
def __init__(
self,
node_1,
node_3,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
"""Initialization"""
self.doors_1 = {
node_1: simpy.PriorityResource(self.env, capacity = 1),
}
self.doors_2 = {
node_3: simpy.PriorityResource(self.env, capacity = 1),
}
class IsLock(HasResource, HasLength, HasLockDoors, Identifiable, Log):
"""Mixin class: Something has lock object properties
properties in meters
operation in seconds
"""
def __init__(
self,
node_1,
node_2,
node_3,
lock_length,
lock_width,
lock_depth,
doors_open,
doors_close,
wlev_dif,
disch_coeff,
grav_acc,
opening_area,
opening_depth,
simulation_start,
operating_time,
*args,
**kwargs
):
"""Initialization"""
# Properties
self.lock_length = lock_length
self.lock_width = lock_width
self.lock_depth = lock_depth
self.wlev_dif = wlev_dif
self.disch_coeff = disch_coeff
self.grav_acc = grav_acc
self.opening_area = opening_area
self.opening_depth = opening_depth
self.simulation_start = simulation_start.timestamp()
self.operating_time = operating_time
# Operating
self.doors_open = doors_open
self.doors_close = doors_close
# Water level
assert node_1 != node_3
self.node_1 = node_1
self.node_3 = node_3
self.water_level = random.choice([node_1, node_3])
super().__init__(length = lock_length, remaining_length = lock_length, node_1 = node_1, node_3 = node_3, *args, **kwargs)
def operation_time(self, environment):
if type(self.wlev_dif) == list:
operating_time = (2*self.lock_width*self.lock_length*abs(self.wlev_dif[1][np.abs(self.wlev_dif[0]-(environment.now-self.simulation_start)).argmin()]))/(self.disch_coeff*self.opening_area*math.sqrt(2*self.grav_acc*self.opening_depth))
elif type(self.wlev_dif) == float or type(self.wlev_dif) == int:
operating_time = (2*self.lock_width*self.lock_length*abs(self.wlev_dif))/(self.disch_coeff*self.opening_area*math.sqrt(2*self.grav_acc*self.opening_depth))
return operating_time
def convert_chamber(self, environment, new_level, number_of_vessels):
""" Convert the water level """
# Close the doors
self.log_entry("Lock doors closing start", environment.now, number_of_vessels, self.water_level)
yield environment.timeout(self.doors_close)
self.log_entry("Lock doors closing stop", environment.now, number_of_vessels, self.water_level)
# Convert the chamber
self.log_entry(
"Lock chamber converting start", environment.now, number_of_vessels, self.water_level
)
# Water level will shift
self.change_water_level(new_level)
yield environment.timeout(self.operation_time(environment))
self.log_entry(
"Lock chamber converting stop", environment.now, number_of_vessels, self.water_level
)
# Open the doors
self.log_entry("Lock doors opening start", environment.now, number_of_vessels, self.water_level)
yield environment.timeout(self.doors_open)
self.log_entry("Lock doors opening stop", environment.now, number_of_vessels, self.water_level)
def change_water_level(self, side):
""" Change water level and priorities in queue """
self.water_level = side
for request in self.resource.queue:
request.priority = -1 if request.priority == 0 else 0
if request.priority == -1:
self.resource.queue.insert(
0, self.resource.queue.pop(self.resource.queue.index(request))
)
else:
self.resource.queue.insert(
-1, self.resource.queue.pop(self.resource.queue.index(request))
)
class Movable(Locatable, Routeable, Log):
"""Mixin class: Something can move
Used for object that can move with a fixed speed
geometry: point used to track its current location
v: speed"""
def __init__(self, v=4, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.v = v
self.wgs84 = pyproj.Geod(ellps="WGS84")
def move(self):
"""determine distance between origin and destination, and
yield the time it takes to travel it
Assumption is that self.path is in the right order - vessel moves from route[0] to route[-1].
"""
self.distance = 0
speed = self.v
# Check if vessel is at correct location - if not, move to location
if (
self.geometry
!= nx.get_node_attributes(self.env.FG, "geometry")[self.route[0]]
):
orig = self.geometry
dest = nx.get_node_attributes(self.env.FG, "geometry")[self.route[0]]
print("Origin", orig)
print("Destination", dest)
self.distance += self.wgs84.inv(
shapely.geometry.asShape(orig).x,
shapely.geometry.asShape(orig).y,
shapely.geometry.asShape(dest).x,
shapely.geometry.asShape(dest).y,
)[2]
yield self.env.timeout(self.distance / self.current_speed)
self.log_entry("Sailing to start", self.env.now, self.distance, dest)
# Move over the path and log every step
for node in enumerate(self.route):
self.node = node[1]
if node[0] + 2 <= len(self.route):
origin = self.route[node[0]]
destination = self.route[node[0] + 1]
if "Waiting area" in self.env.FG.nodes[destination].keys():
locks = self.env.FG.nodes[destination]["Waiting area"]
for lock in locks:
loc = self.route.index(destination)
for r in self.route[loc:]:
if 'Line-up area' in self.env.FG.nodes[r].keys():
wait_for_waiting_area = self.env.now
access_waiting_area = lock.waiting_area[destination].request()
yield access_waiting_area
if wait_for_waiting_area != self.env.now:
waiting = self.env.now - wait_for_waiting_area
self.log_entry("Waiting to enter waiting area start", wait_for_waiting_area, 0, nx.get_node_attributes(self.env.FG, "geometry")[origin],)
self.log_entry("Waiting to enter waiting area stop", self.env.now, waiting, nx.get_node_attributes(self.env.FG, "geometry")[origin],)
if "Waiting area" in self.env.FG.nodes[origin].keys():
locks = self.env.FG.nodes[origin]["Waiting area"]
for lock in locks:
loc = self.route.index(origin)
for r in self.route[loc:]:
if 'Line-up area' in self.env.FG.nodes[r].keys():
locks2 = self.env.FG.nodes[r]["Line-up area"]
for r2 in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r2].keys():
locks3 = self.env.FG.nodes[r2]["Lock"]
break
self.lock_name = []
for lock3 in locks3:
if lock3.water_level == self.route[self.route.index(r2)-1]:
for lock2 in locks2:
if lock2.name == lock3.name:
if lock2.lock_queue_length == 0:
self.lock_name = lock3.name
break
lock_queue_length = [];
if self.lock_name == []:
for lock2 in locks2:
lock_queue_length.append(lock2.lock_queue_length)
self.lock_name = locks2[lock_queue_length.index(min(lock_queue_length))].name
for lock2 in locks2:
if lock2.name == self.lock_name:
lock2.lock_queue_length += 1
for lock2 in locks2:
if lock2.name == self.lock_name:
self.v = 0.5*speed
break
wait_for_lineup_area = self.env.now
lock.waiting_area[origin].release(access_waiting_area)
if self.route[self.route.index(r2)-1] == lock3.node_1:
if lock3.doors_2[lock3.node_3].users != [] and lock3.doors_2[lock3.node_3].users[0].priority == -1:
if self.L < lock2.length.level + lock3.length.level:
access_lineup_length = lock2.length.get(self.L)
elif self.L < lock2.length.level:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and lock3.length.level < lock2.line_up_area[r].users[0].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and self.L < lock2.line_up_area[r].users[-1].lineup_dist-0.5*lock2.line_up_area[r].users[-1].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
elif self.route[self.route.index(r2)-1] == lock3.node_3:
if lock3.doors_1[lock3.node_1].users != [] and lock3.doors_1[lock3.node_1].users[0].priority == -1:
if self.L < lock2.length.level + lock3.length.level:
access_lineup_length = lock2.length.get(self.L)
elif self.L < lock2.length.level:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and lock3.length.level < lock2.line_up_area[r].users[0].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
yield correct_lineup_length
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
else:
if lock2.length.level == lock2.length.capacity:
access_lineup_length = lock2.length.get(self.L)
elif lock2.line_up_area[r].users != [] and self.L < lock2.line_up_area[r].users[-1].lineup_dist-0.5*lock2.line_up_area[r].users[-1].length:
access_lineup_length = lock2.length.get(self.L)
else:
if lock2.length.get_queue == []:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
total_length_waiting_vessels = 0
for q in reversed(range(len(lock2.length.get_queue))):
if lock2.length.get_queue[q].amount == lock2.length.capacity:
break
for q2 in range(q,len(lock2.length.get_queue)):
total_length_waiting_vessels += lock2.length.get_queue[q2].length
if self.L > lock2.length.capacity - total_length_waiting_vessels:
access_lineup_length = lock2.length.get(lock2.length.capacity)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
correct_lineup_length = lock2.length.put(lock2.length.capacity-self.L)
else:
access_lineup_length = lock2.length.get(self.L)
lock2.length.get_queue[-1].length = self.L
yield access_lineup_length
if len(lock2.line_up_area[r].users) != 0:
self.lineup_dist = lock2.line_up_area[r].users[-1].lineup_dist - 0.5*lock2.line_up_area[r].users[-1].length - 0.5*self.L
else:
self.lineup_dist = lock2.length.capacity - 0.5*self.L
self.wgs84 = pyproj.Geod(ellps="WGS84")
[lineup_area_start_lat, lineup_area_start_lon, lineup_area_stop_lat, lineup_area_stop_lon] = [self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].y,
self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].y]
fwd_azimuth,_,_ = self.wgs84.inv(lineup_area_start_lat, lineup_area_start_lon, lineup_area_stop_lat, lineup_area_stop_lon)
[self.lineup_pos_lat,self.lineup_pos_lon,_] = self.wgs84.fwd(self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].x,
self.env.FG.nodes[self.route[self.route.index(r)]]['geometry'].y,
fwd_azimuth,self.lineup_dist)
access_lineup_area = lock2.line_up_area[r].request()
lock2.line_up_area[r].users[-1].length = self.L
lock2.line_up_area[r].users[-1].id = self.id
lock2.line_up_area[r].users[-1].lineup_pos_lat = self.lineup_pos_lat
lock2.line_up_area[r].users[-1].lineup_pos_lon = self.lineup_pos_lon
lock2.line_up_area[r].users[-1].lineup_dist = self.lineup_dist
lock2.line_up_area[r].users[-1].n = len(lock2.line_up_area[r].users)
lock2.line_up_area[r].users[-1].v = 0.25*speed
lock2.line_up_area[r].users[-1].wait_for_next_cycle = False
yield access_lineup_area
enter_lineup_length = lock2.enter_line_up_area[r].request()
yield enter_lineup_length
lock2.enter_line_up_area[r].users[0].id = self.id
if wait_for_lineup_area != self.env.now:
self.v = 0.25*speed
waiting = self.env.now - wait_for_lineup_area
self.log_entry("Waiting in waiting area start", wait_for_lineup_area, 0, nx.get_node_attributes(self.env.FG, "geometry")[origin])
self.log_entry("Waiting in waiting area stop", self.env.now, waiting, nx.get_node_attributes(self.env.FG, "geometry")[origin])
break
if "Line-up area" in self.env.FG.nodes[destination].keys():
locks = self.env.FG.nodes[destination]["Line-up area"]
for lock in locks:
if lock.name == self.lock_name:
loc = self.route.index(destination)
orig = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
for r in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Lock"]
for lock2 in locks:
for q in range(len(lock.line_up_area[destination].users)):
if lock.line_up_area[destination].users[q].id == self.id:
if self.route[self.route.index(r)-1] == lock2.node_1:
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
if q <= 1 and lock.line_up_area[destination].users[q].n != lock.line_up_area[destination].users[q].n-len(lock2.resource.users):
self.lineup_dist = lock.length.capacity - 0.5*self.L
elif self.route[self.route.index(r)-1] == lock2.node_3:
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
if q <= 1 and lock.line_up_area[destination].users[q].n != lock.line_up_area[destination].users[q].n-len(lock2.resource.users):
self.lineup_dist = lock.length.capacity - 0.5*self.L
[self.lineup_pos_lat,self.lineup_pos_lon,_] = self.wgs84.fwd(self.env.FG.nodes[self.route[self.route.index(destination)]]['geometry'].x,
self.env.FG.nodes[self.route[self.route.index(destination)]]['geometry'].y,
fwd_azimuth,self.lineup_dist)
lock.line_up_area[destination].users[q].lineup_pos_lat = self.lineup_pos_lat
lock.line_up_area[destination].users[q].lineup_pos_lon = self.lineup_pos_lon
lock.line_up_area[destination].users[q].lineup_dist = self.lineup_dist
break
if "Line-up area" in self.env.FG.nodes[origin].keys():
locks = self.env.FG.nodes[origin]["Line-up area"]
for lock in locks:
if lock.name == self.lock_name:
loc = self.route.index(origin)
orig = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
for r in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Lock"]
lock.enter_line_up_area[origin].release(enter_lineup_length)
for q in range(len(lock.line_up_area[origin].users)):
if lock.line_up_area[origin].users[q].id == self.id:
if q > 0:
_,_,distance = self.wgs84.inv(orig.x,
orig.y,
lock.line_up_area[origin].users[0].lineup_pos_lat,
lock.line_up_area[origin].users[0].lineup_pos_lon)
yield self.env.timeout(distance/self.v)
break
for lock2 in locks:
if lock2.name == self.lock_name:
self.v = 0.25*speed
wait_for_lock_entry = self.env.now
for r2 in self.route[(loc+1):]:
if 'Line-up area' in self.env.FG.nodes[r2].keys():
locks = self.env.FG.nodes[r2]["Line-up area"]
for lock3 in locks:
if lock3.name == self.lock_name:
break
break
if self.route[self.route.index(r)-1] == lock2.node_1:
if len(lock2.doors_2[lock2.node_3].users) != 0:
if lock2.doors_2[lock2.node_3].users[0].priority == -1:
if self.L > (lock2.resource.users[-1].lock_dist-0.5*lock2.resource.users[-1].length) or lock2.resource.users[-1].converting == True:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].release(access_lock_door2)
wait_for_next_cycle = lock3.pass_line_up_area[r2].request()
yield wait_for_next_cycle
lock3.pass_line_up_area[r2].release(wait_for_next_cycle)
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif (len(lock2.doors_1[lock2.node_1].users) == 0 or (len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock3.converting_while_in_line_up_area[r2].users != []:
waiting_during_converting = lock3.converting_while_in_line_up_area[r2].request()
yield waiting_during_converting
lock3.converting_while_in_line_up_area[r2].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
elif lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == 0:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
elif (len(lock2.doors_1[lock2.node_1].users) == 0 or (len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
yield access_lock_door1
else:
access_lock_door1 = lock2.doors_1[lock2.node_1].request()
if lock2.doors_2[lock2.node_3].users != [] and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
lock2.doors_2[lock2.node_3].release(lock2.doors_2[lock2.node_3].users[0])
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request(priority = -1)
yield access_lock_door2
lock2.doors_2[lock2.node_3].users[0].id = self.id
elif self.route[self.route.index(r)-1] == lock2.node_3:
if len(lock2.doors_1[lock2.node_1].users) != 0:
if lock2.doors_1[lock2.node_1].users[0].priority == -1:
if self.L > (lock2.resource.users[-1].lock_dist-0.5*lock2.resource.users[-1].length) or lock2.resource.users[-1].converting == True:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].release(access_lock_door1)
wait_for_next_cycle = lock3.pass_line_up_area[r2].request()
yield wait_for_next_cycle
lock3.pass_line_up_area[r2].release(wait_for_next_cycle)
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif (len(lock2.doors_2[lock2.node_3].users) == 0 or (len(lock2.doors_2[lock2.node_3].users) != 0 and lock2.doors_2[lock2.node_3].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
lock2.doors_1[lock2.node_1].release(lock2.doors_1[lock2.node_1].users[0])
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
if lock3.converting_while_in_line_up_area[r2].users != []:
waiting_during_converting = lock3.converting_while_in_line_up_area[r2].request()
yield waiting_during_converting
lock3.converting_while_in_line_up_area[r2].release(waiting_during_converting)
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
lock2.doors_1[lock2.node_1].release(lock2.doors_1[lock2.node_1].users[0])
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
lock2.doors_1[lock2.node_1].release(lock2.doors_1[lock2.node_1].users[0])
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
elif lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == 0:
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
if lock.converting_while_in_line_up_area[origin].users != []:
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request(priority = -1)
yield waiting_during_converting
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
elif (len(lock2.doors_2[lock2.node_3].users) == 0 or (len(lock2.doors_2[lock2.node_3].users) != 0 and lock2.doors_2[lock2.node_3].users[0].priority != -1)) and self.route[self.route.index(r)-1] != lock2.water_level:
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
waiting_during_converting = lock.converting_while_in_line_up_area[origin].request()
yield waiting_during_converting
yield from lock2.convert_chamber(self.env, self.route[self.route.index(r)-1], 0)
lock.converting_while_in_line_up_area[origin].release(waiting_during_converting)
elif len(lock2.doors_2[lock2.node_3].users) != 0 and lock2.doors_2[lock2.node_3].users[0].priority == -1:
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
yield access_lock_door2
else:
access_lock_door2 = lock2.doors_2[lock2.node_3].request()
if lock2.doors_1[lock2.node_1].users != [] and lock2.doors_1[lock2.node_1].users[0].priority == -1:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
lock2.doors_1[lock2.node_1].release(lock2.doors_1[lock2.node_1].users[0])
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
else:
access_lock_door1 = lock2.doors_1[lock2.node_1].request(priority = -1)
yield access_lock_door1
lock2.doors_1[lock2.node_1].users[0].id = self.id
access_lock_length = lock2.length.get(self.L)
access_lock = lock2.resource.request()
access_lock_pos_length = lock2.pos_length.get(self.L)
self.lock_dist = lock2.pos_length.level + 0.5*self.L
yield access_lock_pos_length
lock2.resource.users[-1].id = self.id
lock2.resource.users[-1].length = self.L
lock2.resource.users[-1].lock_dist = self.lock_dist
lock2.resource.users[-1].converting = False
if self.route[self.route.index(r)-1] == lock2.node_1:
lock2.resource.users[-1].dir = 1.0
else:
lock2.resource.users[-1].dir = 2.0
if wait_for_lock_entry != self.env.now:
waiting = self.env.now - wait_for_lock_entry
self.log_entry("Waiting in line-up area start", wait_for_lock_entry, 0, orig)
self.log_entry("Waiting in line-up area stop", self.env.now, waiting, orig)
self.wgs84 = pyproj.Geod(ellps="WGS84")
[doors_origin_lat, doors_origin_lon, doors_destination_lat, doors_destination_lon] = [self.env.FG.nodes[self.route[self.route.index(r)-1]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)-1]]['geometry'].y,
self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r)+1]]['geometry'].y]
fwd_azimuth,_,distance = self.wgs84.inv(doors_origin_lat, doors_origin_lon, doors_destination_lat, doors_destination_lon)
[self.lock_pos_lat,self.lock_pos_lon,_] = self.wgs84.fwd(self.env.FG.nodes[self.route[self.route.index(r)-1]]['geometry'].x,
self.env.FG.nodes[self.route[self.route.index(r)-1]]['geometry'].y,
fwd_azimuth,self.lock_dist)
for r4 in reversed(self.route[:(loc-1)]):
if 'Line-up area' in self.env.FG.nodes[r4].keys():
locks = self.env.FG.nodes[r4]["Line-up area"]
for lock4 in locks:
if lock4.name == self.lock_name:
lock4.lock_queue_length -= 1
break
elif 'Waiting area' in self.env.FG.nodes[r].keys():
for r2 in reversed(self.route[:(loc-1)]):
if 'Lock' in self.env.FG.nodes[r2].keys():
locks = self.env.FG.nodes[r2]["Lock"]
for lock2 in locks:
if lock2.name == self.lock_name:
if self.route[self.route.index(r2)+1] == lock2.node_3 and len(lock2.doors_2[lock2.node_3].users) != 0 and lock2.doors_2[lock2.node_3].users[0].id == self.id:
lock2.doors_2[lock2.node_3].release(access_lock_door2)
elif self.route[self.route.index(r2)+1] == lock2.node_1 and len(lock2.doors_1[lock2.node_1].users) != 0 and lock2.doors_1[lock2.node_1].users[0].id == self.id:
lock2.doors_1[lock2.node_1].release(access_lock_door1)
lock.pass_line_up_area[origin].release(departure_lock)
lock2.resource.release(access_lock)
departure_lock_length = lock2.length.put(self.L)
departure_lock_pos_length = lock2.pos_length.put(self.L)
yield departure_lock_length
yield departure_lock_pos_length
break
if "Line-up area" in self.env.FG.nodes[self.route[node[0]-1]].keys():
locks = self.env.FG.nodes[self.route[node[0]-1]]["Line-up area"]
for lock in locks:
if lock.name == self.lock_name:
loc = self.route.index(origin)
for r in self.route[loc:]:
if 'Lock' in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Lock"]
lock.line_up_area[self.route[node[0]-1]].release(access_lineup_area)
departure_lineup_length = lock.length.put(self.L)
yield departure_lineup_length
if "Lock" in self.env.FG.nodes[origin].keys():
locks = self.env.FG.nodes[origin]["Lock"]
for lock in locks:
if lock.name == self.lock_name:
if self.route[self.route.index(origin)-1] == lock.node_1:
lock.doors_1[lock.node_1].release(access_lock_door1)
elif self.route[self.route.index(origin)-1] == lock.node_3:
lock.doors_2[lock.node_3].release(access_lock_door2)
orig = shapely.geometry.Point(self.lock_pos_lat,self.lock_pos_lon)
loc = self.route.index(origin)
for r2 in reversed(self.route[loc:]):
if "Line-up area" in self.env.FG.nodes[r2].keys():
locks = self.env.FG.nodes[r2]["Line-up area"]
for lock3 in locks:
if lock3.name == self.lock_name:
departure_lock = lock3.pass_line_up_area[r2].request(priority = -1)
break
break
for r in reversed(self.route[:(loc-1)]):
if "Line-up area" in self.env.FG.nodes[r].keys():
locks = self.env.FG.nodes[r]["Line-up area"]
for lock2 in locks:
if lock2.name == self.lock_name:
for q2 in range(0,len(lock.resource.users)):
if lock.resource.users[q2].id == self.id:
break
start_time_in_lock = self.env.now
self.log_entry("Passing lock start", self.env.now, 0, orig)
if len(lock2.line_up_area[r].users) != 0 and lock2.line_up_area[r].users[0].length < lock.length.level:
if self.route[self.route.index(origin)-1] == lock.node_1:
access_line_up_area = lock2.enter_line_up_area[r].request()
yield access_line_up_area
lock2.enter_line_up_area[r].release(access_line_up_area)
access_lock_door1 = lock.doors_1[lock.node_1].request()
yield access_lock_door1
lock.doors_1[lock.node_1].release(access_lock_door1)
elif self.route[self.route.index(origin)-1] == lock.node_3:
access_line_up_area = lock2.enter_line_up_area[r].request()
yield access_line_up_area
lock2.enter_line_up_area[r].release(access_line_up_area)
access_lock_door2 = lock.doors_2[lock.node_3].request()
yield access_lock_door2
lock.doors_2[lock.node_3].release(access_lock_door2)
if lock.resource.users[0].id == self.id:
lock.resource.users[0].converting = True
number_of_vessels = len(lock.resource.users)
yield from lock.convert_chamber(self.env, destination,number_of_vessels)
else:
for u in range(len(lock.resource.users)):
if lock.resource.users[u].id == self.id:
lock.resource.users[u].converting = True
yield self.env.timeout(lock.doors_close + lock.operation_time(self.env) + lock.doors_open)
break
yield departure_lock
self.log_entry("Passing lock stop", self.env.now, self.env.now-start_time_in_lock, orig,)
[self.lineup_pos_lat,self.lineup_pos_lon] = [self.env.FG.nodes[self.route[self.route.index(r2)]]['geometry'].x, self.env.FG.nodes[self.route[self.route.index(r2)]]['geometry'].y]
yield from self.pass_edge(origin, destination)
self.v = speed
else:
# print('I am going to go to the next node {}'.format(destination))
yield from self.pass_edge(origin, destination)
if node[0] + 2 == len(self.route):
break
# self.geometry = nx.get_node_attributes(self.env.FG, "geometry")[destination]
logger.debug(" distance: " + "%4.2f" % self.distance + " m")
logger.debug(" sailing: " + "%4.2f" % self.current_speed + " m/s")
logger.debug(
" duration: "
+ "%4.2f" % ((self.distance / self.current_speed) / 3600)
+ " hrs"
)
def pass_edge(self, origin, destination):
edge = self.env.FG.edges[origin, destination]
orig = nx.get_node_attributes(self.env.FG, "geometry")[origin]
dest = nx.get_node_attributes(self.env.FG, "geometry")[destination]
if "Lock" in self.env.FG.nodes[origin].keys():
orig = shapely.geometry.Point(self.lock_pos_lat,self.lock_pos_lon)
if "Lock" in self.env.FG.nodes[destination].keys():
dest = shapely.geometry.Point(self.lock_pos_lat,self.lock_pos_lon)
if "Line-up area" in self.env.FG.nodes[origin].keys():
orig = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
if "Line-up area" in self.env.FG.nodes[destination].keys():
dest = shapely.geometry.Point(self.lineup_pos_lat,self.lineup_pos_lon)
if 'geometry' in edge:
edge_route = np.array(edge['geometry'])
# check if edge is in the sailing direction, otherwise flip it
distance_from_start = self.wgs84.inv(
orig.x,
orig.y,
edge_route[0][0],
edge_route[0][1],
)[2]
distance_from_stop = self.wgs84.inv(
orig.x,
orig.y,
edge_route[-1][0],
edge_route[-1][1],
)[2]
if distance_from_start>distance_from_stop:
# when the distance from the starting point is greater than from the end point
edge_route = np.flipud(np.array(edge['geometry']))
for index, pt in enumerate(edge_route[:-1]):
sub_orig = shapely.geometry.Point(edge_route[index][0], edge_route[index][1])
sub_dest = shapely.geometry.Point(edge_route[index+1][0], edge_route[index+1][1])
distance = self.wgs84.inv(
shapely.geometry.asShape(sub_orig).x,
shapely.geometry.asShape(sub_orig).y,
shapely.geometry.asShape(sub_dest).x,
shapely.geometry.asShape(sub_dest).y,
)[2]
self.distance += distance
self.log_entry("Sailing from node {} to node {} sub edge {} start".format(origin, destination, index), self.env.now, 0, sub_orig,)
yield self.env.timeout(distance / self.current_speed)
self.log_entry("Sailing from node {} to node {} sub edge {} stop".format(origin, destination, index), self.env.now, 0, sub_dest,)
self.geometry = dest
# print(' My new origin is {}'.format(destination))
else:
distance = self.wgs84.inv(
shapely.geometry.asShape(orig).x,
shapely.geometry.asShape(orig).y,
shapely.geometry.asShape(dest).x,
shapely.geometry.asShape(dest).y,
)[2]
self.distance += distance
arrival = self.env.now
# Act based on resources
if "Resources" in edge.keys():
with self.env.FG.edges[origin, destination]["Resources"].request() as request:
yield request
if arrival != self.env.now:
self.log_entry("Waiting to pass edge {} - {} start".format(origin, destination), arrival, 0, orig,)
self.log_entry("Waiting to pass edge {} - {} stop".format(origin, destination), self.env.now, 0, orig,)
self.log_entry("Sailing from node {} to node {} start".format(origin, destination), self.env.now, 0, orig,)
yield self.env.timeout(distance / self.current_speed)
self.log_entry("Sailing from node {} to node {} stop".format(origin, destination), self.env.now, 0, dest,)
else:
self.log_entry("Sailing from node {} to node {} start".format(origin, destination), self.env.now, 0, orig,)
yield self.env.timeout(distance / self.current_speed)
self.log_entry("Sailing from node {} to node {} stop".format(origin, destination), self.env.now, 0, dest,)
@property
def current_speed(self):
return self.v
class ContainerDependentMovable(Movable, HasContainer):
"""ContainerDependentMovable class
Used for objects that move with a speed dependent on the container level
compute_v: a function, given the fraction the container is filled (in [0,1]), returns the current speed"""
def __init__(self, compute_v, *args, **kwargs):
super().__init__(*args, **kwargs)
"""Initialization"""
self.compute_v = compute_v
self.wgs84 = pyproj.Geod(ellps="WGS84")
@property
def current_speed(self):
return self.compute_v(self.container.level / self.container.capacity)
| StarcoderdataPython |
1725016 | # ----------------------------------------------------
# Copyright The IETF Trust 2018-9, All Rights Reserved
# ----------------------------------------------------
import coverage
coverage.process_startup()
| StarcoderdataPython |
206040 | from sampleMangler.__main__ import main
def test_main():
assert main([]) == 0
| StarcoderdataPython |
3355600 | <reponame>mhp/del-plkr
#!/usr/bin/env python
# Heavily based on http://notes.natbat.net/2007/03/06/delicioussnaflr/
import urllib2, urllib, time, datetime
from xml.dom import minidom
deliciousUsername = 'xxxx'
deliciousPassword = '<PASSWORD>'
tag = 'plkr'
class DeliciousAccount():
apiRoot = 'https://api.del.icio.us/'
def __init__(self, username, password):
"""Logs into delicious"""
pmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
pmgr.add_password(
None, self.apiRoot, username, password
)
auth_handler = urllib2.HTTPBasicAuthHandler(pmgr)
self.opener = urllib2.build_opener(auth_handler)
def get_response_root(self, uri, rootTag):
response = minidom.parseString(self.opener.open(uri).read())
return response.getElementsByTagName(rootTag)[0]
def get_update_time(self):
"""retrieve last update time"""
update = self.get_response_root(self.apiRoot+'v1/posts/update', 'update')
return update.getAttribute('time')
def get_posts(self, tag=None):
"""Retrieve posts, filtered by optional tag"""
uri = self.apiRoot+'v1/posts/all?'
if tag is not None:
uri += 'tag=%s'%tag
pList = self.get_response_root(uri, 'posts').getElementsByTagName('post')
return [ DeliciousPost(p) for p in pList ]
def update_post(self, post):
result = self.get_response_root(
self.apiRoot+'v1/posts/add?'+post.Info,
'result')
return result.getAttribute('code')
def urlencode(d):
for key, value in d.items():
if isinstance(value, unicode):
d[key] = value.encode('utf-8')
return urllib.urlencode(d)
class DeliciousPost():
def __init__(self, xmlPost):
self.url = xmlPost.getAttribute('href')
self.description = xmlPost.getAttribute('description')
self.extended = xmlPost.getAttribute('extended')
self.tags = xmlPost.getAttribute('tag').split()
self.time = xmlPost.getAttribute('time')
# privacy flag? need to see what it is called
def Info(self):
return urlencode({
'url': self.url,
'description': self.description,
'extended': self.extended,
'tags': " ".join(self.tags),
'dt': self.time,
'replace': 'yes'
})
def syncronise_account():
"""Kickstarts the whole kaboodle"""
print 'starting sync'
delAcct = DeliciousAccount(deliciousUsername, deliciousPassword)
print delAcct.get_update_time()
posts = delAcct.get_posts(tag)
for p in posts:
print "Processing ", p.url, " ...",
p.tags.remove(tag)
print delAcct.update_post(p)
time.sleep(2)
if __name__ == '__main__':
syncronise_account()
| StarcoderdataPython |
4842936 | from censusreporter.config.base.urls import urlpatterns, handler500
| StarcoderdataPython |
11214910 | from untwisted.network import *
from untwisted.event import *
from untwisted.utils.stdio import *
from untwisted.utils.shrug import *
from untwisted.task import *
from socket import *
from struct import pack
from socket import inet_aton
from re import finditer, compile, DOTALL
from UserDict import UserDict
DONE = get_event()
TIMEOUT = get_event()
COMPLETE = get_event()
"""
These are examples of request that are to be sent.
We are assuming that 172.16.58.3:8001 is the server target
address.
REQUEST_HTTP = 'CONNECT 172.16.58.3:8001 HTTP/1.0\r\n\r\n'
REQUEST_SQUID = 'GET http://172.16.58.3:8001 HTTP/1.0\r\n\r\n'
REQUEST_SOCKS4 = pack('!BBH4sB',
4, # SOCK VERSION it is 4.
1, # SOCK command, in this case 1 = CONNECT
8000, # It is the targer port.
inet_aton('172.16.58.3'), # The target ip.
0) # terminator for our user-id (empty string).
REQUEST_SOCKS5 = '%s%s' % (struct.pack(
'!BBB',
5, # version
1, # number of supported methods
0, # anonymous
)
# HACK: just send the CONNECT request straight away instead of
# waiting for the server to reply to our negotiation.
,
struct.pack(
'!BBBB4sH',
5, # version
1, # command (1 for CONNECT)
0, # reserved
1, # address type (1 for IPv4 address)
inet_aton(self.target_ip),
self.target_port))
"""
def set_up_server(port, backlog):
""" This is the counter part of is_active.
It is the server that will wait for the cookies
and send them back through the proxy.
"""
sock = socket(AF_INET, SOCK_STREAM)
serv = Spin(sock)
serv.bind(('', port))
serv.listen(backlog)
# Since it will be a listening socket.
Server(serv)
def handle_accept(serv, con):
""" It just installs basic protocols
and set up basic handles.
"""
Stdin(con)
Stdout(con)
Shrug(con)
# We just send the cookie back.
xmap(con, FOUND, lambda con, data: con.dump('%s\r\n' % data))
xmap(con, CLOSE, lose)
xmap(serv, ACCEPT, handle_accept)
return serv
def is_active(proxy_ip, proxy_port, request, cookie, timeout):
""" This function is used to test whether a given proxy
completes a cycle.
It sends a cookie and waits for it.
If the cookie isn't sent back by the server it just runs timeout event.
The proxy ip that wants to test.
proxy_ip
The port in which the proxy listens on.
proxy_port
It contains a specific chunk of text
containing ip and port for the dummy server.
request
The key that must be returned. If the server
returns such a key then it is an active proxy.
cookie
If the server doesnt send the cookie in timeout
then it throws TIMEOUT event.
timeout
"""
sock = socket(AF_INET, SOCK_STREAM)
con = Spin(sock)
Client(con)
def run_timeout():
""" It is used to spawn a TIMEOUT event, it is
if we don't get our cookie back in a given period
of time after having connected then it might not
be a proxy.
"""
spawn(con, TIMEOUT)
# We just kill the connection.
lose(con)
def missing_cookie(con):
sched.unmark(timeout, run_timeout)
lose(con)
def set_up_con(con):
""" When it stablishes a connection with the proxy
this callback is called in order to set a timeout
and send the request and the cookie and it waits
for the cookie.
"""
# It installs basic protocols.
Stdout(con)
Stdin(con)
# We will be using Shrug since we will need it
# to match the cookies that are sent back
# through the proxy.
Shrug(con)
# Whenever a complete line is found we just
# spawn such a line as an event.
# So, if we send '123\r\n' the server would
# send back '123\r\n' and we would know it is
# a proxy.
xmap(con, FOUND, lambda con, data: spawn(con, data, data))
# It has to be called just once so we
# pass True.
sched.after(timeout, run_timeout, True)
xmap(con, CLOSE, missing_cookie)
# We dump the request.
con.dump(request)
# We must wait for the request being dumped
# in order to send the cookie.
yield hold(con, DUMPED)
con.dump('%s\r\n' % cookie)
# We then finally wait for the cookie back.
yield hold(con, cookie)
# If we got our cookie back then we do not need
# to spawn TIMEOUT it means the proxy is active.
sched.unmark(timeout, run_timeout)
# We finally spawn DONE and lose the connection with the proxy.
spawn(con, DONE)
lose(con)
# It starts the process.
xmap(con, CONNECT, set_up_con)
xmap(con, CONNECT_ERR, lose)
con.connect_ex((proxy_ip, proxy_port))
return con
def load_data(filename):
""" This function is used to filter proxy file list.
Sometimes you just copy&paste proxies from a site
they commonly are pasted in a wrong format.
This just extracts all the ip:port chunks
and returns an iterator contaning Match objects
then can do:
for ind in load_file('some_dirty_file_of_proxies'):
print ind.group('ip'), ind.group('port')
It would print
ip:port well formated.
I use this function to filter proxy lists
that i get from sites so i can test them.
"""
ADDR_STR = '(?P<ip>[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+)[^0-9]+?(?P<port>[0-9]+)'
ADDR_REG = compile(ADDR_STR, DOTALL)
with open(filename, 'r') as fd:
data = fd.read()
seq = finditer(ADDR_REG, data)
return seq
class Task(UserDict, Mode):
""" In a non asynchronous scenary, functions return values when their
tasks are done. These tasks are performed sequentially.
As we are dealing with an asynchronous scenary we will need to know
when a given function has performed its tasks.
As untwisted is purely based on event, some functions will rely on
events in order to get their job done.
we need a way of knowing when such a function really has got off its
'asynchronous scope'.
When a function gets in its scope most times it will be possible
to determine the number of tasks that will be performed.
In this class I try to abstract a scheme that could work in some situations
with an asynchronous scenary. I implement a counter and a variable to hold
the limit of tasks that we are expecting to get done. It has
a 'do' function which is to be called by the events(the tasks).
When an event occurs it increments the number of tasks done
when the number of tasks done is equal to the limit of tasks defined
then we have our function's job entirely done.
Of course, it wouldn't work in all scenaries, some functions
will rely on events that might occur more than once so it might turn into
a mess since we do not specify how many times a given event might occur.
This class should work for functions/protocols that operate on spins whose events
occur just once.
Another way of thinking of this class is like an accumulator of events.
When it accumulates how many events as needed then it is time to spawn
an event the COMPLETE event.
"""
def __init__(self, lim=0):
# I use UserDict since it is meant
# to be a generic accumulator of events.
# People will be able to have their callbacks
# called with 'mode' as instance and 'mode' will be
# a dictionary holding the events's argument.
UserDict.__init__(self)
Mode.__init__(self)
# The amount of time that the events must occur.
self.lim = lim
# It counts how many times the events occured.
self.index = 0
def add(self):
""" It adds a task to be performed. """
self.lim = self.lim + 1
def __call__(self, event, *args):
""" Called when an event is issued it must be binded to a
spin as xmap(spin, EVENT, task_instance)
"""
# If there is not a list of args for
# a given event we just create it.
chain = self.setdefault(event, [])
# It just accumulates the event argument.
chain.append(args)
# So, we have one task done.
self.index = self.index + 1
# It checks whether all the tasks were done.
if self.index >= self.lim:
# If all them we done we just spawn COMPLETE.
spawn(self, COMPLETE)
def run_test(database, request, cookie, timeout):
""" This function receives database of proxies
and run a test on them. It runs the type of test
specified by request and uses a cookie, timeout
for all the proxies.
It returns a Task object that is used to match
COMPLETE event that means all the proxies were
checked.
"""
task = Task()
for ind in database:
ip, port = ind.group('ip', 'port')
con = is_active(ip, int(port), request, cookie, timeout)
xmap(con, DONE, lambda con, ip=ip, port=port: task(DONE, ip, port))
xmap(con, TIMEOUT, lambda con, ip=ip, port=port: task(TIMEOUT, ip, port))
xmap(con, CONNECT_ERR, lambda con, ip=ip, port=port: task(CONNECT_ERR, ip, port))
# If the host closed before sending the cookie back
# then either it doesn't let to connect or just isn't
# a proxy.
xmap(con, CLOSE, lambda con, ip=ip, port=port: task(CLOSE, ip, port))
task.add()
return task
"""
class RunTest(Mode):
def __init__(self, database, request, cookie, timeout):
Mode.__init__(self)
self.pool = { DONE: [], TIMEOUT: [], CONNECT_ERR :[] }
self.lim = 0
self.index = 0
for ind in database:
ip, port = ind.group('ip', 'port')
con = is_active(ip, int(port), request, cookie, timeout)
xmap(con, DONE, self.update, DONE, (ip, port))
xmap(con, TIMEOUT, self.update, TIMEOUT, (ip, port))
xmap(con, CONNECT_ERR, self.update, CONNECT_ERR, (ip, port))
self.add()
self.lim = self.lim + 1
def update(self, con, event, addr):
self.pool[event].append(addr)
self.index = self.index + 1
if self.index >= self.lim:
spawn(self, COMPLETE, self.pool)
"""
| StarcoderdataPython |
5081872 | from __future__ import print_function, division
from PIL.Image import NONE
import warnings
warnings.filterwarnings("ignore")
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
import matplotlib.pyplot as plt
import torch.optim as optim
import time
import os
import copy
plt.ion() # interactive mode
from eczema_model.config import config
# print(config.DATAPATH)
# Check GPU presence
def Is_gpu_avaliable():
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
return device
# Load Data
def load_datsets():
# Data augmentation and normalization for training
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
]),
}
data_dir = 'eczema_model/data/'
image_datasets = {x: datasets.ImageFolder(os.path.join(data_dir, x),
data_transforms[x])
for x in ['train', 'val']}
dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=4,
shuffle=True, num_workers=4)
for x in ['train', 'val']}
dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
class_names = image_datasets['train'].classes
print("class_names", class_names)
return dataloaders, dataset_sizes, class_names
#Save model
def save_model(model, epoch, loss, optimizer):
torch.save(model.state_dict(), "mobilenetv3small.pth")
# torch.save({
# 'epoch': epoch,
# 'model_state_dict': model.state_dict(),
# 'optimizer_state_dict': optimizer.state_dict(),
# 'loss': loss,
# }, "mobilenetv3small.pth")
#load model
def load_checkpoint(modelpath, model):
checkpoint = torch.load(modelpath)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
model.eval()
# - or -
model.train()
return model
# Select pretrained model
def load_pretrained_model(display_arch=False):
mobilenet_v2 = models.resnet18(pretrained=True)
if(display_arch):
print(mobilenet_v2)
return mobilenet_v2
def freezing_pretrained_model(model, class_names):
# for param in model.parameters():
# param.requires_grad = False
n_inputs = model.fc.in_features
last_layer = nn.Linear(n_inputs, len(class_names))
model.fc = last_layer
# if GPU is available, move the model to GPU
if Is_gpu_avaliable():
model.cuda()
# check to see that your last layer produces the expected number of outputs
print("The number of ouput classes are: ", model.fc.out_features)
return model
# Define pytorch model
def pretrained_model_tuning():
#classes are folders in each directory with these names
classes = ['Atopic dermatitis', 'Neurodermatitis', 'Stasis dermatitis','Contact dermatitis',
'Nummular eczema','Dyshidrotic eczema', 'Seborrheic dermatitis']
dataloaders, dataset_sizes, class_names = load_datsets()
# Load pretained model
mobile_net = load_pretrained_model(display_arch=True)
#freeze the last layer and modify the last layer
mobile_net = freezing_pretrained_model(mobile_net, class_names)
return mobile_net
def train_model(model, criterion, optimizer, scheduler, dataset_size, dataloaders):
num_epochs = config.NUMEPOCHS
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(Is_gpu_avaliable())
labels = labels.to(Is_gpu_avaliable())
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / dataset_size[phase]
epoch_acc = running_corrects.double() / dataset_size[phase]
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
# torch.save(model.state_dict(), "eczema_model\mobilenetv3small.pth")
save_model(model, epoch_acc, epoch_loss, optimizer)
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model
def loss_function_optimzer():
model_ft = pretrained_model_tuning()
criterion = nn.CrossEntropyLoss()
# Observe that all parameters are being optimized
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=7, gamma=0.1)
return model_ft, criterion, optimizer_ft, exp_lr_scheduler
| StarcoderdataPython |
11309507 | from ctrlibrary.core.utils import get_observables
from ctrlibrary.threatresponse.enrich import enrich_observe_observables
from tests.functional.tests.constants import (
MODULE_NAME,
HIBP_URL,
CTR_ENTITIES_LIMIT
)
def test_positive_sighting_email(module_headers):
"""Perform testing for enrich observe observables endpoint to get
sighting for observable from Have I Been Pwned module
ID: CCTRI-812-57f2d8d6-a897-4ce4-abcf-331296e2d86a
Steps:
1. Send request to enrich deliberate observable endpoint
Expectedresults:
1. Check that data in response body contains expected sighting for
observable from Have I Been Pwned module
Importance: Critical
"""
observable = [{'type': 'email', 'value': '<EMAIL>'}]
response = enrich_observe_observables(
payload=observable,
**{'headers': module_headers}
)
sightings = get_observables(
response, 'Have I Been Pwned')['data']['sightings']
assert len(sightings['docs']) > 0
# check some generic properties
for sighting in sightings['docs']:
assert sighting['type'] == 'sighting'
assert sighting['count'] == 1
assert sighting['internal'] is False
assert sighting['title'] == f'Found on {MODULE_NAME}'
assert sighting['observables'] == observable
assert sighting['source'] == MODULE_NAME
assert sighting['source_uri'] == (
f'{HIBP_URL}/account/user%40example.com'
)
assert sighting['targets'][0]['type'] == 'email'
assert sighting['targets'][0]['observables'] == observable
assert sighting['observed_time']['start_time'] == (
sighting['observed_time']['end_time']
)
assert sightings['count'] == len(sightings['docs']) <= CTR_ENTITIES_LIMIT
# check properties of one unique sighting
sighting = [
d for d in sightings['docs'] if 'Apollo' in d['description']][0]
assert sighting['description'] == (
f'{observable[0]["value"]} present in Apollo breach.'
)
relation = {
'origin': MODULE_NAME,
'origin_uri': f'{HIBP_URL}/account/user%40example.com',
'relation': 'Leaked_From',
'source': observable[0],
'related': {'value': 'apollo.io', 'type': 'domain'}
}
assert sighting['relations'][0] == relation
assert sighting['confidence'] == 'High'
assert sighting['severity'] == 'Medium'
| StarcoderdataPython |
6611247 | from headless_mju_crawler import *
options = webdriver.ChromeOptions()
options.binary_location='/usr/bin/google-chrome-unstable'
options.add_argument('headless')
options.add_argument('window-size=1200x800')
driver = webdriver.Chrome(chrome_options=options)
driver.get('http://myiweb.mju.ac.kr')
mju_id = driver.find_element_by_id('userID')
mju_pw = driver.find_element_by_id('userPW')
driver.execute_script('document.getElementById("userID").value=""')
driver.execute_script('document.getElementById("userPW").value=""')
driver.execute_script('CheckSubmit()')
driver.get("https://myiweb.mju.ac.kr/servlet/MyLocationPage?link=/su/sue/sue01/w_sue337pr.jsp")
Select(driver.find_element_by_name("dept_cd")).select_by_value('12913')
driver.execute_script("thisPage1()")
resize_page(driver)
find_end = driver.find_element_by_class_name("crownix-pagenum")
endpage = int(find_end.text.split(" ")[2])
| StarcoderdataPython |
1696570 | <reponame>cjsteel/python3-venv-ansible-2.10.5
#!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Info
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_info
short_description: NetApp Element Software Info
extends_documentation_fragment:
- netapp.elementsw.netapp.solidfire
version_added: 20.10.0
author: NetApp Ansible Team (@carchi8py) <<EMAIL>>
description:
- Collect cluster and node information.
- Use a MVIP as hostname for cluster and node scope.
- Use a MIP as hostname for node scope.
- When using MIPs, cluster APIs are expected to fail with 'xUnknownAPIMethod method=ListAccounts'
options:
gather_subsets:
description:
- list of subsets to gather from target cluster or node
- supported values
- node_config, cluster_accounts
- additional values
- all - for all subsets,
- all_clusters - all subsets at cluster scope,
- all_nodes - all subsets at node scope
type: list
elements: str
default: ['all']
aliases: ['gather_subset']
filter:
description:
- When a list of records is returned, this can be used to limit the records to be returned.
- If more than one key is used, all keys must match.
type: dict
fail_on_error:
description:
- by default, errors are not fatal when collecting a subset. The subset will show on error in the info output.
- if set to True, the module fails on the first error.
type: bool
default: false
fail_on_key_not_found:
description:
- force an error when filter is used and a key is not present in records.
type: bool
default: true
fail_on_record_not_found:
description:
- force an error when filter is used and no record is matched.
type: bool
default: false
'''
EXAMPLES = """
- name: get all available subsets
na_elementsw_info:
hostname: "{{ elementsw_mvip }}"
username: "{{ elementsw_username }}"
password: "{{ <PASSWORD> }}"
gather_subsets: all
register: result
- name: collect data for elementsw accounts using a filter
na_elementsw_info:
hostname: "{{ elementsw_mvip }}"
username: "{{ elementsw_username }}"
password: "{{ <PASSWORD> }}"
gather_subsets: 'cluster_accounts'
filter:
username: "{{ username_to_find }}"
register: result
"""
RETURN = """
info:
description:
- a dictionary of collected subsets
- each subset if in JSON format
returned: success
type: dict
debug:
description:
- a list of detailed error messages if some subsets cannot be collected
returned: success
type: list
"""
from ansible.module_utils.basic import AnsibleModule
import ansible_collections.netapp.elementsw.plugins.module_utils.netapp as netapp_utils
from ansible_collections.netapp.elementsw.plugins.module_utils.netapp_module import NetAppModule
HAS_SF_SDK = netapp_utils.has_sf_sdk()
class ElementSWInfo(object):
'''
Element Software Initialize node with ownership for cluster formation
'''
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
gather_subsets=dict(type='list', elements='str', aliases=['gather_subset'], default='all'),
filter=dict(type='dict'),
fail_on_error=dict(type='bool', default=False),
fail_on_key_not_found=dict(type='bool', default=True),
fail_on_record_not_found=dict(type='bool', default=False),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.debug = list()
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
# 442 for node APIs, 443 (default) for cluster APIs
for role, port in [('node', 442), ('cluster', 443)]:
try:
conn = netapp_utils.create_sf_connection(module=self.module, raise_on_connection_error=True, port=port)
if role == 'node':
self.sfe_node = conn
else:
self.sfe_cluster = conn
except netapp_utils.solidfire.common.ApiConnectionError as exc:
if str(exc) == "Bad Credentials":
msg = ' Make sure to use valid %s credentials for username and password.' % 'node' if port == 442 else 'cluster'
msg += '%s reported: %s' % ('Node' if port == 442 else 'Cluster', repr(exc))
else:
msg = 'Failed to create connection for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc))
self.module.fail_json(msg=msg)
except Exception as exc:
self.module.fail_json(msg='Failed to connect for %s:%d - %s' % (self.parameters['hostname'], port, repr(exc)))
# TODO: add new node methods here
self.node_methods = dict(
node_config=self.sfe_node.get_config,
)
# TODO: add new cluster methods here
self.cluster_methods = dict(
cluster_accounts=self.sfe_cluster.list_accounts
)
self.methods = dict(self.node_methods)
self.methods.update(self.cluster_methods)
# add telemetry attributes - does not matter if we are using cluster or node here
# TODO: most if not all get and list APIs do not have an attributes parameter
def get_info(self, name):
'''
Get Element Info
run a cluster or node list method
return output as json
'''
info = None
if name not in self.methods:
msg = 'Error: unknown subset %s.' % name
msg += ' Known_subsets: %s' % ', '.join(self.methods.keys())
self.module.fail_json(msg=msg, debug=self.debug)
try:
info = self.methods[name]()
return info.to_json()
except netapp_utils.solidfire.common.ApiServerError as exc:
if 'err_json=500 xUnknownAPIMethod method=' in str(exc):
info = 'Error (API not in scope?)'
else:
info = 'Error'
msg = '%s for subset: %s: %s' % (info, name, repr(exc))
if self.parameters['fail_on_error']:
self.module.fail_json(msg=msg)
self.debug.append(msg)
return info
def filter_list_of_dict_by_key(self, records, key, value):
matched = list()
for record in records:
if key in record and record[key] == value:
matched.append(record)
if key not in record and self.parameters['fail_on_key_not_found']:
msg = 'Error: key %s not found in %s' % (key, repr(record))
self.module.fail_json(msg=msg)
return matched
def filter_records(self, records, filter_dict):
if isinstance(records, dict):
if len(records) == 1:
key, value = list(records.items())[0]
return dict({key: self.filter_records(value, filter_dict)})
if not isinstance(records, list):
return records
matched = records
for key, value in filter_dict.items():
matched = self.filter_list_of_dict_by_key(matched, key, value)
if self.parameters['fail_on_record_not_found'] and len(matched) == 0:
msg = 'Error: no match for %s out of %d records' % (repr(self.parameters['filter']), len(records))
self.debug.append('Unmatched records: %s' % repr(records))
self.module.fail_json(msg=msg, debug=self.debug)
return matched
def get_and_filter_info(self, name):
'''
Get data
If filter is present, only return the records that are matched
return output as json
'''
records = self.get_info(name)
if self.parameters.get('filter') is None:
return records
matched = self.filter_records(records, self.parameters.get('filter'))
return matched
def apply(self):
'''
Check connection and initialize node with cluster ownership
'''
changed = False
info = dict()
my_subsets = ('all', 'all_clusters', 'all_nodes')
if any(x in self.parameters['gather_subsets'] for x in my_subsets) and len(self.parameters['gather_subsets']) > 1:
msg = 'When any of %s is used, no other subset is allowed' % repr(my_subsets)
self.module.fail_json(msg=msg)
if 'all' in self.parameters['gather_subsets']:
self.parameters['gather_subsets'] = self.methods.keys()
if 'all_clusters' in self.parameters['gather_subsets']:
self.parameters['gather_subsets'] = self.cluster_methods.keys()
if 'all_nodes' in self.parameters['gather_subsets']:
self.parameters['gather_subsets'] = self.node_methods.keys()
for name in self.parameters['gather_subsets']:
info[name] = self.get_and_filter_info(name)
self.module.exit_json(changed=changed, info=info, debug=self.debug)
def main():
'''
Main function
'''
na_elementsw_cluster = ElementSWInfo()
na_elementsw_cluster.apply()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1832685 | <filename>apf/core/step.py
from abc import abstractmethod
from apf.producers import GenericProducer
from apf.core import get_class
from apf.consumers import KafkaConsumer
import logging
import datetime
class GenericStep:
"""Generic Step for apf.
Parameters
----------
config : dict
Dictionary containing configuration for the various components of the step
level : logging.level
Logging level, has to be a logging.LEVEL constant.
Adding `LOGGING_DEBUG` to `settings.py` set the step's global logging level to debug.
.. code-block:: python
#settings.py
LOGGING_DEBUG = True
**step_args : dict
Additional parameters for the step.
"""
def __init__(
self,
config={},
level=logging.INFO,
**step_args,
):
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.info(f"Creating {self.__class__.__name__}")
self.config = config
self.consumer = self._get_consumer()(self.consumer_config())
self.producer = self._get_producer()(self.producer_config())
self.commit = self.config.get("COMMIT", True)
self.metrics = {}
self.metrics_sender = None
self.extra_metrics = []
if self.config.get("METRICS_CONFIG"):
Metrics = get_class(
self.config["METRICS_CONFIG"].get(
"CLASS", "apf.metrics.KafkaMetricsProducer"
)
)
self.metrics_sender = Metrics(
self.config["METRICS_CONFIG"]["PARAMS"]
)
self.extra_metrics = self.config["METRICS_CONFIG"].get(
"EXTRA_METRICS", ["candid"]
)
def consumer_config(self):
return self.config["CONSUMER_CONFIG"]
def producer_config(self):
return self.config.get("PRODUCER_CONFIG", {})
def _get_consumer(self):
if self.config.get("CONSUMER_CONFIG"):
if "CLASS" in self.consumer_config():
Consumer = get_class(self.consumer_config()["CLASS"])
else:
Consumer = KafkaConsumer
return Consumer
raise Exception("Could not find CONSUMER_CONFIG in the step config")
def _get_producer(self):
if self.config.get("PRODUCER_CONFIG"):
producer_config = self.config["PRODUCER_CONFIG"]
if "CLASS" in producer_config:
Consumer = get_class(producer_config["CLASS"])
else:
Consumer = GenericProducer
return Consumer
return GenericProducer
def send_metrics(self, **metrics):
"""Send Metrics with a metrics producer.
For this method to work the `METRICS_CONFIG` variable has to be set in the `STEP_CONFIG`
variable.
**Example:**
Send the compute time for an object.
.. code-block:: python
#example_step/step.py
self.send_metrics(compute_time=compute_time, oid=oid)
For this to work we need to declare
.. code-block:: python
#settings.py
STEP_CONFIG = {...
"METRICS_CONFIG":{ #Can be a empty dictionary
"CLASS": "apf.metrics.KafkaMetricsProducer",
"PARAMS": { # params for the apf.metrics.KafkaMetricsProducer
"PARAMS":{
## this producer uses confluent_kafka.Producer, so here we provide
## arguments for that class, like bootstrap.servers
bootstrap.servers": "kafka1:9092",
},
"TOPIC": "metrics_topic" # the topic to store the metrics
},
}
}
Parameters
----------
**metrics : dict-like
Parameters sent to the kafka topic as message.
"""
if self.metrics_sender:
metrics["source"] = self.__class__.__name__
self.metrics_sender.send_metrics(metrics)
def _pre_consume(self):
self.logger.info("Starting step. Begin processing")
self.pre_consume()
@abstractmethod
def pre_consume(self):
pass
def _pre_execute(self):
self.logger.debug("Received message. Begin preprocessing")
self.metrics["timestamp_received"] = datetime.datetime.now(
datetime.timezone.utc
)
if self.step_type == "component":
self.consumer.commit()
preprocessed = self.pre_execute(self.message)
return preprocessed or self.message
@abstractmethod
def pre_execute(self, message):
pass
@abstractmethod
def execute(self, message):
"""Execute the logic of the step. This method has to be implemented by
the instanced class.
Parameters
----------
message : dict, list
Dict-like message to be processed or list of dict-like messages
"""
pass
def _post_execute(self, result):
self.logger.debug("Processed message. Begin post processing")
final_result = self.post_execute(result)
if self.step_type != "component" and self.commit:
self.consumer.commit()
self.metrics["timestamp_sent"] = datetime.datetime.now(
datetime.timezone.utc
)
time_difference = (
self.metrics["timestamp_sent"] - self.metrics["timestamp_received"]
)
self.metrics["execution_time"] = time_difference.total_seconds()
if self.extra_metrics:
extra_metrics = self.get_extra_metrics(self.message)
self.metrics.update(extra_metrics)
self.send_metrics(**self.metrics)
return final_result
@abstractmethod
def post_execute(self, result):
return result
def _pre_produce(self, result):
self.logger.debug("Finished all processing. Begin message production")
message_to_produce = self.pre_produce(result)
return message_to_produce
@abstractmethod
def pre_produce(self, result):
return result
def _post_produce(self):
self.logger.debug("Message produced. Begin post production")
self.post_produce()
@abstractmethod
def post_produce(self):
pass
def get_value(self, message, params):
"""Get values from a massage and process it to create a new metric.
Parameters
----------
message : dict
Dict-like message to be processed
params : str, dict
String of the value key or dict with the following:
- 'key': str
Must have parameter, has to be in the message.
- 'alias': str
New key returned, this can be used to standarize some message keys.
- 'format': callable
Function to be call on the message value.
Returns
-------
new_key, value
Aliased key and processed value.
"""
if isinstance(params, str):
return params, message.get(params)
elif isinstance(params, dict):
if "key" not in params:
raise KeyError("'key' in parameteres not found")
val = message.get(params["key"])
if "format" in params:
if not callable(params["format"]):
raise ValueError("'format' parameter must be a callable.")
else:
val = params["format"](val)
if "alias" in params:
if isinstance(params["alias"], str):
return params["alias"], val
else:
raise ValueError("'alias' parameter must be a string.")
else:
return params["key"], val
def get_extra_metrics(self, message):
"""Generate extra metrics from the EXTRA_METRICS metrics configuration.
Parameters
----------
message : dict, list
Dict-like message to be processed or list of dict-like messages
Returns
-------
dict
Dictionary with extra metrics from the messages.
"""
# Is the message is a list then the metrics are
# added to an array of values.
if isinstance(message, list):
extra_metrics = {}
for msj in message:
for metric in self.extra_metrics:
aliased_metric, value = self.get_value(msj, metric)
# Checking if the metric exists
if aliased_metric not in extra_metrics:
extra_metrics[aliased_metric] = []
extra_metrics[aliased_metric].append(value)
extra_metrics["n_messages"] = len(message)
# If not they are only added as a single value.
else:
extra_metrics = {}
for metric in self.extra_metrics:
aliased_metric, value = self.get_value(message, metric)
extra_metrics[aliased_metric] = value
extra_metrics["n_messages"] = 1
return extra_metrics
def start(self):
"""Start running the step."""
self._pre_consume()
for self.message in self.consumer.consume():
preprocessed_msg = self._pre_execute()
result = self.execute(preprocessed_msg)
result = self._post_execute(result)
result = self._pre_produce(result)
self.producer.produce(result)
self._post_produce()
self._tear_down()
def _tear_down(self):
self.logger.info(
"Processing finished. No more messages. Begin tear down."
)
self.tear_down()
f = open("__SUCCESS__", "w")
f.close()
def tear_down(self):
pass
class SimpleStep(GenericStep):
def __init__(self, config, **step_args):
super().__init__(config, step_args)
self.step_type = "simple"
class ComponentStep(GenericStep):
def __init__(self, config, **step_args):
super().__init__(config, step_args)
self.step_type = "component"
class CompositeStep(GenericStep):
def __init__(self, config, **step_args):
super().__init__(config, step_args)
self.step_type = "composite"
def consumer_config(self, scope="OUTER"):
return self.config["CONSUMER_CONFIG"][scope]
def producer_config(self, scope="OUTER"):
return self.config.get("PRODUCER_CONFIG", {}).get(scope, {})
def _get_consumer(self, scope="OUTER"):
if self.config.get("CONSUMER_CONFIG"):
consumer_config = self.config["CONSUMER_CONFIG"]
if "CLASS" in consumer_config[scope]:
Consumer = get_class(consumer_config[scope]["CLASS"])
else:
Consumer = KafkaConsumer
return Consumer
raise Exception("Could not find CONSUMER_CONFIG in the step config")
def _get_producer(self, scope="OUTER"):
if self.config.get("PRODUCER_CONFIG"):
producer_config = self.config["PRODUCER_CONFIG"]
if "CLASS" in producer_config[scope]:
Producer = get_class(producer_config[scope]["CLASS"])
else:
Producer = GenericProducer
return Producer
return GenericProducer
def _internal_produce(self, message):
internal_producer = self._get_producer(scope="INNER")(
self.producer_config(scope="INNER")
)
def msg_gen():
if isinstance(message, list):
for msg in message:
yield msg
else:
yield message
for msg in msg_gen():
internal_producer.produce(message)
def _internal_consume(self):
internal_consumer = self._get_consumer(scope="INNER")(
self.consumer_config(scope="INNER")
)
message_list = []
for msg in internal_consumer.consume():
message_list.append(msg)
return message_list
def execute(self, message):
self._internal_produce(message)
return self._internal_consume()
| StarcoderdataPython |
276746 |
# Adding Frames to Video
# imported necessary library
import tkinter
from tkinter import *
import tkinter as tk
import tkinter.messagebox as mbox
from tkinter import filedialog
from PIL import ImageTk, Image
import cv2
from moviepy.editor import *
# Main Window & Configuration
window = tk.Tk() # created a tkinter gui window frame
window.title("Adding Frames to Video") # title given is "DICTIONARY"
window.geometry('1000x700')
# top label
start1 = tk.Label(text = "ADD MORE FRAMES\nTO VIDEO", font=("Arial", 55,"underline"), fg="magenta") # same way bg
start1.place(x = 125, y = 10)
def start_fun():
window.destroy()
# start button created
startb = Button(window, text="START",command=start_fun,font=("Arial", 25), bg = "orange", fg = "blue", borderwidth=3, relief="raised")
startb.place(x =150 , y =580 )
# image on the main window
path = "Images/front.jpg"
# Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.
img1 = ImageTk.PhotoImage(Image.open(path))
# The Label widget is a standard Tkinter widget used to display a text or image on the screen.
panel = tk.Label(window, image = img1)
panel.place(x = 90, y = 210)
# function created for exiting
def exit_win():
if mbox.askokcancel("Exit", "Do you want to exit?"):
window.destroy()
# exit button created
exitb = Button(window, text="EXIT",command=exit_win,font=("Arial", 25), bg = "red", fg = "blue", borderwidth=3, relief="raised")
exitb.place(x =730 , y = 580 )
window.protocol("WM_DELETE_WINDOW", exit_win)
window.mainloop()
# Main Window & Configuration
window1 = tk.Tk() # created a tkinter gui window frame
window1.title("Adding Frames to Video") # title given is "DICTIONARY"
window1.geometry('1000x700')
# function to select file
def open_video():
global video_name
video_name = filedialog.askopenfilename(title="Select Video")
# print(filename)
path_video.delete("1.0", "end")
path_video.insert(END, video_name)
# function to select file
def open_images():
global filename, image_names
filename = filedialog.askopenfilenames(title="Select Images")
# print(len(filename))
path_images.delete("1.0", "end")
image_names = ""
for i in filename:
# print(i)
image_names = image_names + i + "\n"
path_images.insert(END, image_names)
# function to add frames
def add_frames():
global video_name, image_names, before_cnt, after_cnt, filename
path_list = []
before_cnt = 0
after_cnt = 0
# converting videos to images --------------------------------
# Read the video from specified path
cam = cv2.VideoCapture(video_name)
# print(cam.get(cv2.CAP_PROP_FPS))
# info1.config(text="Frame Rate : " + str(cam.get(cv2.CAP_PROP_FPS)))
x = int(cam.get(cv2.CAP_PROP_FPS))
try:
# creating a folder named data
if not os.path.exists('Video Images'):
os.makedirs('Video Images')
# if not created then raise error
except OSError:
print('Error: Creating directory of data')
# frame
currentframe = 0
x2 = 0
while (True):
# reading from frame
ret, frame = cam.read()
if ret:
if currentframe % x == 0:
# if video is still left continue creating images
x1 = int(currentframe / x)
name = './Video Images/frame' + str(x1) + '.jpg'
# print(x1, end = " ")
before_cnt = before_cnt + 1
path_list.append(name)
# print ('Creating...' + name)
# writing the extracted images
cv2.imwrite(name, frame)
# increasing counter so that it will
# show how many frames are created
currentframe += 1
else:
break
# ret,frame = cam.read()
# info2.config(text="No. of frame/Images : " + str(x2))
# Release all space and windows once done
cam.release()
cv2.destroyAllWindows()
# print(len(path_list))
# for i in path_list:
# print(i)
for i in filename:
path_list.append(i)
after_cnt = len(filename) + before_cnt
ic_list = []
for i in range(after_cnt):
ic_list.append(ImageClip(path_list[i]).set_duration(1))
video = concatenate(ic_list, method="compose")
video.write_videofile('slide_show.mp4', fps=24)
mbox.showinfo("Success", "Frames added to selected video successfully!")
# function to show original video
def orig_video():
global video_name
sourceo = cv2.VideoCapture(video_name)
# running the loop
while True:
# extracting the frames
ret1, img1 = sourceo.read()
# displaying the video
cv2.imshow("Original Video", img1)
# exiting the loop
key = cv2.waitKey(1)
if key == ord("q"):
break
# function to show new video
def new_video():
global video_name
sourcen = cv2.VideoCapture('slide_show.mp4')
# running the loop
while True:
# extracting the frames
ret1, img2 = sourcen.read()
# displaying the video
cv2.imshow("New Video with additional frames", img2)
# exiting the loop
key = cv2.waitKey(1)
if key == ord("q"):
break
# function defined to get the frame count info
def frame_cnt():
global before_cnt, after_cnt
mbox.showinfo("Frame Count Info", "Frame Count before addition of new frame in video : " + str(before_cnt) + "\n\nFrame Count after addition of nre frame in video : " + str(after_cnt))
# top label
start1 = tk.Label(text = "ADD MORE FRAMES TO VIDEO", font=("Arial", 45, "underline"), fg="magenta") # same way bg
start1.place(x = 50, y = 10)
lbl2 = tk.Label(text="Select Video", font=("Arial", 30),fg="brown") # same way bg
lbl2.place(x=80, y=100)
# Select Button
selectb=Button(window1, text="SELECT",command=open_video, font=("Arial", 17), bg = "light green", fg = "blue")
selectb.place(x = 790, y = 100)
path_video = tk.Text(window1, height=1, width=37, font=("Arial", 30), bg="light yellow", fg="orange",borderwidth=2, relief="solid")
path_video.place(x=80, y = 150)
lbl2 = tk.Label(text="Select Frames to add", font=("Arial", 30),fg="brown") # same way bg
lbl2.place(x=80, y=220)
# Select Button
selectb=Button(window1, text="SELECT",command=open_images, font=("Arial", 17), bg = "light green", fg = "blue")
selectb.place(x = 790, y = 220)
path_images = tk.Text(window1, height=4, width=37, font=("Arial", 30), bg="light yellow", fg="orange",borderwidth=2, relief="solid")
path_images.place(x=80, y = 270)
# original Button
selectb=Button(window1, text="ORIGINAL VIDEO",command=orig_video, font=("Arial", 25), bg = "orange", fg = "blue")
selectb.place(x = 80, y = 500)
# new Button
selectb=Button(window1, text="NEW VIDEO",command=new_video, font=("Arial", 25), bg = "orange", fg = "blue")
selectb.place(x = 680, y = 500)
# add frames Button
selectb=Button(window1, text="ADD FRAMES",command=add_frames, font=("Arial", 25), bg = "light green", fg = "blue")
selectb.place(x = 100, y = 580)
# frame count Button
getb=Button(window1, text="FRAME CNT.",command=frame_cnt, font=("Arial", 25), bg = "yellow", fg = "blue")
getb.place(x = 410, y = 540)
# function defined for exiting
def exit_win1():
if mbox.askokcancel("Exit", "Do you want to exit?"):
window1.destroy()
# Get Images Button
getb=Button(window1, text="EXIT",command=exit_win1, font=("Arial", 25), bg = "red", fg = "blue")
getb.place(x = 740, y = 580)
window1.protocol("WM_DELETE_WINDOW", exit_win1)
window1.mainloop()
| StarcoderdataPython |
6514203 | <reponame>X-rayLaser/multi-directional-mdrnn
import tensorflow as tf
from mdrnn._util.directions import Direction
class MultiDirectional(tf.keras.layers.Layer):
def __init__(self, rnn, **kwargs):
super(MultiDirectional, self).__init__(**kwargs)
self._original_rnn = rnn
directions = Direction.get_all_directions(rnn.ndims)
self._rnns = [rnn.spawn(direction) for direction in directions]
def build(self, input_shape):
pass
def call(self, inputs, **kwargs):
if self._original_rnn.return_sequences:
num_output_dimensions = len(inputs.shape)
else:
num_output_dimensions = 2
results_list = [rnn.call(inputs, **kwargs) for rnn in self._rnns]
if not self._original_rnn.return_state:
last_axis = num_output_dimensions - 1
return tf.concat(results_list, axis=last_axis)
else:
outputs_list = []
states_list = []
for result in results_list:
activations = result[0]
states = result[1:]
outputs_list.append(activations)
states_list.extend(states)
outputs_last_axis = num_output_dimensions - 1
outputs = tf.concat(outputs_list, axis=outputs_last_axis)
return [outputs] + states_list
| StarcoderdataPython |
3595841 | from src.EthLeverageWallet import EthLeverage
from web3 import Web3
http_rpc = None # https://mainnet.infura.io/v3/...
# Automatically takes `w3.eth.accounts[0]` as the main account
i_EthLW = EthLeverage().init(http_rpc)
i_EthLW.buildContract()
value = 50 # 50 Ether
gasPrice = 100 # 100 Gwei
maxGas = 3000000 # 3 Mio.
transaction_value = i_EthLW.w3.toWei(value, "ether")
transaction_gaspr = i_EthLW.w3.toWei(gasPrice, "gwei")
i_EthLW.action(transaction_value, gasPrice=transaction_gaspr)
| StarcoderdataPython |
3592974 | <reponame>MatKie/SGTPy
from __future__ import division, print_function, absolute_import
import numpy as np
def g2mca(xhi00, khs, xm, da2new, suma_g2, eps, a1vdw_cte):
g2 = 3 * da2new / eps
g2 -= khs * suma_g2 / xhi00
g2 /= xm
g2 /= - a1vdw_cte
return g2
def dg2mca_dxhi00(xhi00, khs, dkhs, xm, d2a2new, dsuma_g2, eps, a1vdw_cte):
sum1, dsum1 = dsuma_g2
g2 = 3. * np.asarray(d2a2new) / eps
g2[0] -= khs * sum1 / xhi00
g2[1] += khs * sum1 / xhi00**2
g2[1] -= (sum1 * dkhs + khs * dsum1)/xhi00
g2 /= xm
g2 /= - a1vdw_cte
return g2
def d2g2mca_dxhi00(xhi00, khs, dkhs, d2khs, xm, d3a2new,
d2suma_g2, eps, a1vdw_cte):
sum1, dsum1, d2sum1 = d2suma_g2
g2 = 3. * np.asarray(d3a2new) / eps
aux1 = khs * sum1 / xhi00
aux2 = aux1 / xhi00
aux3 = (sum1 * dkhs + khs * dsum1)/xhi00
g2[0] -= aux1
g2[1] += aux2
g2[1] -= aux3
g2[2] += 2*aux3/xhi00
g2[2] -= 2*aux2/xhi00
g2[2] -= (2*dkhs*dsum1 + sum1*d2khs + khs*d2sum1)/xhi00
g2 /= xm
g2 /= - a1vdw_cte
return g2
def dg2mca_dx(xhi00, khs, dkhsx, xm, ms, da2new, da2newx, suma_g2, suma_g2x,
eps, a1vdw_cte):
g2 = 3 * da2new / eps
g2 -= khs * suma_g2 / xhi00
g2 /= xm
g2 /= - a1vdw_cte
dg2x = -np.multiply.outer(dkhsx, suma_g2)
dg2x -= khs * suma_g2x
dg2x += khs * np.multiply.outer(ms, suma_g2) / xm
dg2x /= xhi00
dg2x -= 3 * np.multiply.outer(ms, da2new / eps) / xm
dg2x += 3. * da2newx / eps
dg2x /= xm
dg2x /= -a1vdw_cte
return g2, dg2x
def dg2mca_dxxhi(xhi00, khs, dkhs, dkhsx, xm, ms, d2a2new, da2newx,
dsuma_g2, suma_g2x, eps, a1vdw_cte):
sum1, dsum1 = dsuma_g2
g2 = 3. * np.asarray(d2a2new) / eps
g2[0] -= khs * sum1 / xhi00
g2[1] += khs * sum1 / xhi00**2
g2[1] -= (sum1 * dkhs + khs * dsum1)/xhi00
g2 /= xm
g2 /= - a1vdw_cte
dg2x = -np.multiply.outer(dkhsx, sum1)
dg2x -= khs * suma_g2x
dg2x += khs * np.multiply.outer(ms, sum1) / xm
dg2x /= xhi00
dg2x -= 3 * np.multiply.outer(ms, d2a2new[0] / eps) / xm
dg2x += 3. * da2newx / eps
dg2x /= xm
dg2x /= -a1vdw_cte
return g2, dg2x
| StarcoderdataPython |
8064941 | <gh_stars>100-1000
import logging
import struct
from enum import Enum
from typing import Optional, Tuple, Iterator, List
import lief # type: ignore
from .opcodes import OPCODES
from .utils import BitStream, crc_data, decrypt_lame, decrypt_mt, filetime_to_dt
EA05_MAGIC = bytes.fromhex("a3484bbe986c4aa9994c530a86d6487d41553321")
class AutoItVersion(Enum):
EA05 = 0
EA06 = 1
# 10 megabytes
MAX_SCRIPT_SIZE = 10 * 10 ** 6
log = logging.getLogger(__name__)
def u32(data: bytes) -> int:
return struct.unpack("<I", data[:4])[0]
def get_script_resource(pe: lief.PE) -> Optional[lief.PE.ResourceDirectory]:
for child in pe.resources.childs:
for grandchild in child.childs:
if grandchild.has_name and grandchild.name == "SCRIPT":
return grandchild
return None
def decompress(data: bytes, version: AutoItVersion) -> Optional[bytes]:
if data[:4] == b"EA05" and version == AutoItVersion.EA05:
pass
elif data[:4] == b"EA06" and version == AutoItVersion.EA06:
pass
else:
log.error("Magic mismatch: %s", data[:4])
return None
uncompressed_size = struct.unpack(">I", data[4:8])[0]
if uncompressed_size > MAX_SCRIPT_SIZE:
log.error("Uncompressed script size is larger than allowed")
return None
data = data[8:]
bin_data = BitStream(data)
out_data = [0] * uncompressed_size
cur_output = 0
while cur_output < uncompressed_size:
addme = 0
# version changes...
if bin_data.get_bits(1) == (version == AutoItVersion.EA06):
out_data[cur_output] = bin_data.get_bits(8)
cur_output += 1
else:
bb = bin_data.get_bits(15)
bs = bin_data.get_bits(2)
if bs == 3:
addme = 3
bs = bin_data.get_bits(3)
if bs == 7:
addme = 10
bs = bin_data.get_bits(5)
if bs == 31:
addme = 41
bs = bin_data.get_bits(8)
if bs == 255:
addme = 296
while True:
bs = bin_data.get_bits(8)
if bs != 255:
break
addme += 255
bs += 3 + addme
i = cur_output - bb
while True:
out_data[cur_output] = out_data[i]
cur_output += 1
i += 1
bs -= 1
if bs <= 0:
break
return bytes(out_data)
def deassemble_script(script_data: bytes) -> str:
section_num = struct.unpack("<I", script_data[:4])[0]
section_index = 0
offset = 4
out = ""
while section_index < section_num:
opcode = script_data[offset]
if opcode in OPCODES:
add, off = OPCODES[opcode](script_data[offset:])
elif opcode == 0x7F:
section_index += 1
add, off = "\r\n", 0 + 1
elif opcode <= 0x0F:
add, off = "", 4 + 1
elif opcode <= 0x1F:
add, off = "", 8 + 1
elif opcode <= 0x2F:
add, off = "", 8 + 1
else:
add, off = "", 0 + 1
out += add
offset += off
return out
def parse_au3_header_ea05(data: bytes, checksum: int) -> Iterator[Tuple[str, bytes]]:
off = 0
while True:
file_str = decrypt_mt(data[off:][:4], 0x16FA)
if file_str != b"FILE":
log.debug("FILE magic mismatch")
# Asssume that this is the end of the embedded data
return
yield
off += 4
flag = u32(data[off:]) ^ 0x29BC
off += 4
auto_str = decrypt_mt(data[off:][:flag], 0xA25E + flag).decode("utf-8")
log.debug("Found a new autoit string: %s", auto_str)
off += flag
path_len = u32(data[off:]) ^ 0x29AC
off += 4
path = decrypt_mt(data[off:][:path_len], 0xF25E + path_len).decode("utf-8")
log.debug("Found a new path: %s", path)
off += path_len
if auto_str == ">>>AUTOIT NO CMDEXECUTE<<<":
off += 1
next_blob = (u32(data[off:]) ^ 0x45AA) + 0x18
off += 4 + next_blob
else:
comp = data[off]
off += 1
data_size = u32(data[off:]) ^ 0x45AA
off += 4
uncompressed_size = u32(data[off:]) ^ 0x45AA # noqa
off += 4
crc = u32(data[off:]) ^ 0xC3D2
off += 4
CreationTime_dwHighDateTime = u32(data[off:])
off += 4
CreationTime = u32(data[off:])
off += 4
LastWriteTime_dwHighDateTime = u32(data[off:])
off += 4
LastWriteTime = u32(data[off:])
off += 4
creation_time = filetime_to_dt(
(CreationTime_dwHighDateTime << 32) + CreationTime
)
last_write_time = filetime_to_dt(
(LastWriteTime_dwHighDateTime << 32) + LastWriteTime
)
log.debug(f"File creation time: {creation_time}")
log.debug(f"File last write time: {last_write_time}")
dec_data = decrypt_mt(data[off:][:data_size], checksum + 0x22AF)
off += data_size
if crc == crc_data(dec_data):
log.debug("CRC data matches")
else:
log.error("CRC data mismatch")
return
yield
if comp == 1:
dec = decompress(dec_data, AutoItVersion.EA05)
if not dec:
log.error("Error while trying to decompress data")
return
yield
dec_data = dec
if auto_str == ">AUTOIT UNICODE SCRIPT<":
yield ("script.au3", dec_data.decode("utf-16").encode("utf-8"))
elif auto_str == ">AUTOIT SCRIPT<":
yield ("script.au3", dec_data)
else:
yield (auto_str, dec_data)
def parse_au3_header_ea06(data: bytes) -> Iterator[Tuple[str, bytes]]:
off = 0
while True:
file_str = decrypt_lame(data[off:][:4], 0x18EE)
if file_str != b"FILE":
return None
off += 4
flag = u32(data[off:]) ^ 0xADBC
off += 4
auto_str = decrypt_lame(data[off:][: flag * 2], 0xB33F + flag).decode("utf-16")
log.debug("Found a new autoit string: %s", auto_str)
off += flag * 2
path_len = u32(data[off:]) ^ 0xF820
off += 4
path = decrypt_lame(data[off:][: path_len * 2], 0xF479 + path_len).decode(
"utf-16"
)
log.debug("Found a new path: %s", path)
off += path_len * 2
if auto_str == ">>>AUTOIT NO CMDEXECUTE<<<":
off += 1
next_blob = (u32(data[off:]) ^ 0x87BC) + 0x18
off += 4 + next_blob
else:
comp = data[off]
off += 1
data_size = u32(data[off:]) ^ 0x87BC
off += 4
uncompressed_size = u32(data[off:]) ^ 0x87BC # noqa
off += 4
crc = u32(data[off:]) ^ 0xA685
off += 4
CreationTime_dwHighDateTime = u32(data[off:])
off += 4
CreationTime = u32(data[off:])
off += 4
LastWriteTime_dwHighDateTime = u32(data[off:])
off += 4
LastWriteTime = u32(data[off:])
off += 4
creation_time = filetime_to_dt(
(CreationTime_dwHighDateTime << 32) + CreationTime
)
last_write_time = filetime_to_dt(
(LastWriteTime_dwHighDateTime << 32) + LastWriteTime
)
log.debug(f"File creation time: {creation_time}")
log.debug(f"File last write time: {last_write_time}")
dec_data = decrypt_lame(data[off:][:data_size], 0x2477)
off += data_size
if crc == crc_data(dec_data):
log.debug("CRC data matches")
else:
log.error("CRC data mismatch")
return
yield
if comp == 1:
dec = decompress(dec_data, AutoItVersion.EA06)
if not dec:
log.error("Error while trying to decompress data")
return
yield
dec_data = dec
if auto_str == ">>>AUTOIT SCRIPT<<<":
yield ("script.au3", deassemble_script(dec_data).encode())
else:
yield (auto_str, dec_data)
def parse_all(data: bytes, version: AutoItVersion) -> List[Tuple[str, bytes]]:
checksum = sum(list(data[:16]))
off = 16
if version == AutoItVersion.EA05:
return list(parse_au3_header_ea05(data[off:], checksum))
elif version == AutoItVersion.EA06:
return list(parse_au3_header_ea06(data[off:]))
else:
raise Exception("Unsupported autoit version %s", version)
def unpack_ea05(binary_data: bytes) -> Optional[List[Tuple[str, bytes]]]:
if EA05_MAGIC not in binary_data:
log.error("Couldn't find the location chunk in binary")
return None
au_off = binary_data.index(EA05_MAGIC)
script_data = binary_data[au_off:][20:]
if script_data[:4] != b"EA05":
log.error("EA05 magic mismatch")
return None
parsed_data = parse_all(script_data[4:], AutoItVersion.EA05)
if not parsed_data:
log.error("Couldn't decode the autoit script")
return None
return parsed_data
def unpack_ea06(binary_data: bytes) -> Optional[List[Tuple[str, bytes]]]:
pe = lief.parse(raw=list(binary_data))
if not pe:
log.error("Failed to parse the input file")
return None
if not pe.has_resources:
log.error("The input file has no resources")
return None
script_resource = get_script_resource(pe)
if script_resource is None or not script_resource.childs:
log.error("Couldn't find the script resource")
return None
script_data = list(script_resource.childs)[0].content
parsed_data = parse_all(bytes(script_data)[0x18:], AutoItVersion.EA06)
if not parsed_data:
log.error("Couldn't decode the autoit script")
return None
return parsed_data
def extract(
data: bytes, version: Optional[AutoItVersion] = None
) -> Optional[List[Tuple[str, bytes]]]:
if version is None:
log.info("AutoIt version not specified, trying both")
return unpack_ea05(data) or unpack_ea06(data)
elif version == AutoItVersion.EA05:
return unpack_ea05(data)
elif version == AutoItVersion.EA06:
return unpack_ea06(data)
else:
raise Exception("Unknown version specified, use AutoItVersion or None")
| StarcoderdataPython |
8149351 | from __future__ import absolute_import
from rest_framework.response import Response
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import OrganizationMember
class OrganizationMemberIndexEndpoint(OrganizationEndpoint):
def get(self, request, organization):
queryset = OrganizationMember.objects.filter(
organization=organization,
).select_related('user')
member_list = sorted(
queryset,
key=lambda x: x.user.get_display_name() if x.user_id else x.email
)
context = serialize(member_list, request.user)
return Response(context)
| StarcoderdataPython |
6426158 | from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.home, name='home'),
path('list', views.post_list, name='post_list'),
path('<int:year>/<int:month>/<int:day>/<slug:slug>/',views.post_detail,name='post_detail'),
] | StarcoderdataPython |
5197803 | <filename>tests/test_gist_comment.py<gh_stars>1000+
from tests.base import TestCase
from vilya.models.gist_comment import GistComment
from nose.tools import eq_
class TestGistComment(TestCase):
def test_gist_comment(self):
gist = self._add_gist()
user_id = 'testuser'
content = 'xxoo'
new_content = 'ooxx'
gc = GistComment.add(gist.id, user_id, content)
assert isinstance(gc, GistComment)
gcs = GistComment.gets_by_gist_id(gist.id)
eq_(len(gcs), 1)
assert gc.can_delete(user_id)
gc.update(new_content)
gc = GistComment.get(gc.id)
eq_(gc.content, new_content)
gc.delete()
ret = GistComment.get(gc.id)
eq_(ret, None)
| StarcoderdataPython |
1648728 | <gh_stars>0
import requests
from urllib3.exceptions import InsecureRequestWarning
from pprint import pprint
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)
if __name__ == "__main__":
url = "https://netbox.lasthop.io/api/dcim"
http_headers = {}
http_headers["accept"] = "application/json; version=2.4;"
response = requests.get(url, headers=http_headers, verify=False)
print("\nJSON:")
pprint(response.json())
print()
| StarcoderdataPython |
3236692 | <reponame>jzcruiser/doltpy
import atexit
import logging
from typing import List
import psutil # type: ignore
HANDLERS: List[str] = []
SQL_LOG_FILE = None
def cleanup():
logger = logging.getLogger(__name__)
logger.info("Before exiting cleaning up child processes")
all_processes = psutil.Process().children(recursive=True)
if all_processes:
for p in all_processes:
p.kill()
logger.info("Cleaned up, exiting")
else:
logger.info("No processes to clean up, exiting")
def register_cleanup():
atexit.register(cleanup)
| StarcoderdataPython |
8148351 | #!/usr/bin/env python
class IRCMessageBuilder(object):
MAX_MESSAGE_LENGTH = 80 * 4
MAX_NUM_PROJECTS = 4
COLORS = {
'white': "00", 'black': "01", 'blue': "02", 'green': "03", 'red': "04",
'brown': "05", 'purple': "06", 'orange': "07", 'yellow': "08", 'lime': "09",
'teal': "10", 'cyan': "11", 'royal': "12", 'pink': "13", 'grey': "14",
'silver': "15"
}
# The following colors are safe for use on both black and white backgrounds:
# green, red, brown, purple, orange, teal, cyan, royal, pink, grey, silver
#
# Make sure to define a background when using other colors!
PHAB_COLORS = {
# Matches phabricator project colors to IRC colors
'blue': 'teal',
'red': 'brown',
'orange': 'brown',
'yellow': 'orange',
'indigo': 'royal',
'violet': 'purple',
'green': 'green',
'grey': 'grey',
'pink': 'pink',
'checkered': 'silver',
}
PRIORITY = {
'100': 'Unbreak!',
'90': 'Triage',
'80': 'High',
'50': 'Medium',
'25': 'Low',
'10': 'Lowest',
}
STATUSES = {
'open': 'Open',
'needsinfo': 'Needs info',
'invalid': 'Invalid',
'resolved': 'Resolved',
'declined': 'Declined',
'stalled': 'Stalled',
'progress': 'In progress',
}
OUTPUT_PROJECT_TYPES = ['briefcase', 'users', 'umbrella']
# Style may be stripped by irc3 if it's a the beginning (or end) of a line
TEXT_STYLE = {
'bold': '\x02',
'underline': '\x1f',
'reversed': '\x16',
}
def ircformat(self, text, foreground=None, background=None, style=None):
outtext = ""
if foreground or background:
outtext += "\x03"
if foreground:
outtext += self.COLORS[foreground]
if background:
outtext += "," + self.COLORS[background]
if style:
outtext += self.TEXT_STYLE[style]
outtext += text
if foreground or background or style:
outtext += "\x0f"
return outtext
def _human_status(self, name):
return self.STATUSES.get(name, name)
def _human_prio(self, name):
return self.PRIORITY.get(str(name), str(name))
def escape(self, text):
"""
Escape user supplied text so it can't be abused to
execute arbitrary IRC commands
:param text: possibly unsafe input
:return: safe output
"""
return text.replace('\n', ' ').replace('\r', ' ').replace('```', '`')
def build_project_text(self, all_projects, matched_projects):
"""
Build project text to be shown.
Requirement:
(1) Show matched projects first, and bold
(2) Next, show other projects if they are in self.OUTPUT_PROJECT_TYPES
(3) Then other tags
(4) But never show disabled text
(5) Unless there aren't any tags at all otherwise
(6) If there are no projects at all, show "(no projects)" in bright red
(7) Never show more than self.MAX_NUM_PROJECTS, even if they are matched
:param all_projects: dict[project name, info] (scraped) or
list[project name] (failed scraping)
:param matched_projects: list[project name]
:return: list with formatted projects
"""
# (3) format all projects
# and map to a standardized format in the process
projects = {}
for project in all_projects:
try:
info = all_projects[project]
except KeyError:
info = {
'shade': 'green',
'tagtype': 'briefcase',
'disabled': False,
'uri': ''
}
info['matched'] = project in matched_projects
color = self.PHAB_COLORS.get(info['shade'], 'teal')
info['irc_text'] = self.ircformat(project, color)
projects[project] = info
# We map projects in four categories:
matched_parts = []
other_projects = []
other_tags = []
hidden_parts = []
# We use them in that order, limiting to N (or (N-1) + 'and M others') projects
for project in sorted(projects):
info = projects[project]
if info['matched']:
matched_parts.append(info['irc_text'])
elif info['disabled']:
hidden_parts.append(info['irc_text'])
elif info['tagtype'] in self.OUTPUT_PROJECT_TYPES:
other_projects.append(info['irc_text'])
else:
other_tags.append(info['irc_text'])
# (1), (2), (3), (4)
show_parts = matched_parts + other_projects + other_tags
# (5), (6)
if len(show_parts) == 0:
show_parts = hidden_parts
if len(show_parts) == 0:
show_parts = [self.ircformat('(no projects)', 'red', style='bold')]
# (7)
overflow_parts = show_parts[self.MAX_NUM_PROJECTS:]
show_parts = show_parts[:self.MAX_NUM_PROJECTS]
if len(overflow_parts) == 1:
show_parts.append(overflow_parts[0])
elif len(overflow_parts) > 0:
show_parts.append("and %i others" % len(overflow_parts))
return ", ".join(show_parts)
def build_message(self, useful_info):
text = self.build_project_text(useful_info['projects'], useful_info['matched_projects']) + ': '
text += useful_info['title']
text += ' - ' + useful_info['url']
text += " (" + self.ircformat(useful_info['user'], "teal") + ") "
is_new = 'new' in useful_info
if is_new:
text += self.ircformat('NEW', 'green') + ' '
elif 'status' in useful_info:
status = useful_info['status']
text += self.ircformat(self._human_status(status['old']), 'brown')
text += '→'
text += self.ircformat(self._human_status(status['new']), 'green') + ' '
if 'priority' in useful_info:
prio = useful_info['priority']
text += 'p:'
if prio['old']:
text += self.ircformat(self._human_prio(prio['old']), 'brown')
text += '→'
text += self.ircformat(self._human_prio(prio['new']), 'green')
text += ' '
if 'assignee' in useful_info:
ass = useful_info['assignee']
text += 'a:'
if ass['old']:
text += self.ircformat(ass['old'], 'brown')
text += '→'
text += self.ircformat(str(ass['new']), 'green')
text += ' '
if 'comment' in useful_info:
text += useful_info['comment']
# Get rid of annoying stuff
text = self.escape(text)
text = text.replace('\t', ' ')
if len(text) > self.MAX_MESSAGE_LENGTH:
text = text[:self.MAX_MESSAGE_LENGTH - 3].rstrip() + "..."
# Make sure the URL is always fully present
if useful_info['url'] not in text:
inserttext = "... - " + useful_info['url']
text = text[:-len(inserttext)] + inserttext
return text
| StarcoderdataPython |
1653750 | <filename>03_NonLinearRegression_Curve/main.py
import nonLinearRegressionCurve as R
R.clearScreen()
dataTraining= R.loadData("dataTraining.txt")
X=dataTraining[:,0:1]
y=dataTraining[:,1:2]
degree=3
theta =R.initTheta(X,degree)
theta = R.optimizedGradientDescent(X, y, theta,degree)
R.plotHypothesis(theta,X,y)
dataPrediction= R.loadData("dataPrediction.txt")
PX=dataPrediction[:,0:1]
Py= R.predict(theta,PX)
print("Prediction Result:\n",R.concatenateVectors(PX,Py))
| StarcoderdataPython |
8146151 | <reponame>harnitsignalfx/tf-synthetics<filename>python-syn/tests/test__histogram.py
from tests import TimedTestCase
from pyformance.meters import Histogram
class HistogramTestCase(TimedTestCase):
def test__a_sample_of_100_from_1000(self):
hist = Histogram(100, 0.99)
for i in range(1000):
hist.add(i)
self.assertEqual(1000, hist.get_count())
self.assertEqual(100, hist.sample.get_size())
snapshot = hist.get_snapshot()
self.assertEqual(100, snapshot.get_size())
for i in snapshot.values:
self.assertTrue(0 <= i and i <= 1000)
self.assertEqual(999, hist.get_max())
self.assertEqual(0, hist.get_min())
self.assertEqual(499.5, hist.get_mean())
self.assertAlmostEqual(83416.6666, hist.get_var(), delta=0.0001)
def test__a_sample_of_100_from_10(self):
hist = Histogram(100, 0.99)
for i in range(10):
hist.add(i)
self.assertEqual(10, hist.get_count())
self.assertEqual(10, hist.sample.get_size())
snapshot = hist.get_snapshot()
self.assertEqual(10, snapshot.get_size())
for i in snapshot.values:
self.assertTrue(0 <= i and i <= 10)
self.assertEqual(9, hist.get_max())
self.assertEqual(0, hist.get_min())
self.assertEqual(4.5, hist.get_mean())
self.assertAlmostEqual(9.1666, hist.get_var(), delta=0.0001)
def test__a_long_wait_should_not_corrupt_sample(self):
hist = Histogram(10, 0.015, clock=self.clock)
for i in range(1000):
hist.add(1000 + i)
self.clock.add(0.1)
self.assertEqual(hist.get_snapshot().get_size(), 10)
for i in hist.sample.get_snapshot().values:
self.assertTrue(1000 <= i and i <= 2000)
self.clock.add(15 * 3600) # 15 hours, should trigger rescale
hist.add(2000)
self.assertEqual(hist.get_snapshot().get_size(), 2)
for i in hist.sample.get_snapshot().values:
self.assertTrue(1000 <= i and i <= 3000)
for i in range(1000):
hist.add(3000 + i)
self.clock.add(0.1)
self.assertEqual(hist.get_snapshot().get_size(), 10)
for i in hist.sample.get_snapshot().values:
self.assertTrue(3000 <= i and i <= 4000)
| StarcoderdataPython |
11397820 | import os
from setuptools import setup
NAME = "easyprocess"
# get __version__
__version__ = None
exec(open(os.path.join(NAME, "about.py")).read())
VERSION = __version__
PYPI_NAME = "EasyProcess"
URL = "https://github.com/ponty/easyprocess"
DESCRIPTION = "Easy to use Python subprocess interface."
LONG_DESCRIPTION = """Easy to use Python subprocess interface.
Documentation: https://github.com/ponty/easyprocess/tree/"""
LONG_DESCRIPTION += VERSION
PACKAGES = [
NAME,
NAME + ".examples",
]
# extra = {}
# if sys.version_info >= (3,):
# extra['use_2to3'] = True
classifiers = [
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
]
setup(
name=PYPI_NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type="text/x-rst",
classifiers=classifiers,
keywords="subprocess interface",
author="ponty",
# author_email='',
url=URL,
license="BSD",
packages=PACKAGES,
# **extra
)
| StarcoderdataPython |
5139252 | <filename>core/algorithm/ppo.py
import torch
from torch.optim.adam import Adam
from core.model.policy_with_value import PolicyWithValue
from core.common import SampleBatch, ParamDict
import torch.nn.functional as F
def get_tensor(batch, device):
states = torch.as_tensor(batch["states"], dtype=torch.float32, device=device)
actions = torch.as_tensor(batch["actions"], dtype=torch.float32, device=device)
advantages = torch.as_tensor(batch["advantages"], dtype=torch.float32, device=device)
returns = torch.as_tensor(batch["returns"], dtype=torch.float32, device=device)
return states, actions, advantages, returns
def ppo_step(config: ParamDict, batch: SampleBatch, policy: PolicyWithValue):
lr, l2_reg, clip_epsilon, policy_iter, i_iter, max_iter, mini_batch_sz = \
config.require("lr", "l2 reg", "clip eps", "optimize policy epochs",
"current training iter", "max iter", "optimize batch size")
lam_entropy = 0.
states, actions, advantages, returns = get_tensor(batch, policy.device)
lr_mult = max(1.0 - i_iter / max_iter, 0.)
clip_epsilon = clip_epsilon * lr_mult
optimizer_policy = Adam(policy.policy_net.parameters(), lr=lr * lr_mult, weight_decay=l2_reg)
optimizer_value = Adam(policy.value_net.parameters(), lr=lr * lr_mult, weight_decay=l2_reg)
with torch.no_grad():
fixed_log_probs = policy.policy_net.get_log_prob(states, actions).detach()
for _ in range(policy_iter):
inds = torch.randperm(states.size(0))
"""perform mini-batch PPO update"""
for i_b in range(inds.size(0) // mini_batch_sz):
slc = slice(i_b * mini_batch_sz, (i_b+1) * mini_batch_sz)
states_i = states[slc]
actions_i = actions[slc]
returns_i = returns[slc]
advantages_i = advantages[slc]
log_probs_i = fixed_log_probs[slc]
"""update critic"""
for _ in range(1):
value_loss = F.mse_loss(policy.value_net(states_i), returns_i)
optimizer_value.zero_grad()
value_loss.backward()
torch.nn.utils.clip_grad_norm_(policy.value_net.parameters(), 0.5)
optimizer_value.step()
"""update policy"""
log_probs, entropy = policy.policy_net.get_log_prob_entropy(states_i, actions_i)
ratio = (log_probs - log_probs_i).clamp_max(15.).exp()
surr1 = ratio * advantages_i
surr2 = torch.clamp(ratio, 1.0 - clip_epsilon, 1.0 + clip_epsilon) * advantages_i
policy_surr = -torch.min(surr1, surr2).mean() - entropy.mean() * lam_entropy
optimizer_policy.zero_grad()
policy_surr.backward()
torch.nn.utils.clip_grad_norm_(policy.policy_net.parameters(), 0.5)
optimizer_policy.step()
| StarcoderdataPython |
4800334 | #!/usr/bin/env python
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import sys
from scripts.configure import add_package
def store(script, pkg_type, pkg_name):
working = os.path.dirname(script)
add_package(pkg_type, pkg_name, working=working)
def usage():
print("%s <pkg_type> <pkg_name>" % sys.argv[0])
exit(-1)
def main():
if not len(sys.argv) == 3:
usage()
store(*sys.argv)
if __name__ == '__main__':
main()
exit(0)
| StarcoderdataPython |
8022434 | <filename>scr/Analysis/dbsnp_mapping/00-parse-gencode-gff3.py
import re,json,sys
def takeFirst(elem):
return elem[1]
def transfer_binary(seqlen,start,end):
bseq='0'+'0'*int(start)+'1'*(int(end)-int(start))+'0'*(int(seqlen)-int(end))
return bseq
def fix_position(pos_list):
pos_fina=[]
for pos in pos_list:
if pos:
pos_fina.append([int(pos[0])+1,int(pos[1])-1])
#print("pos fina:")
#print(pos_fina)
return pos_fina
def transfer_loc(bseq,list_min):
##print("start transfer location to pos list:")
##print("bseq:")
##print(bseq)
##print("list min:")
# #print(list_min)
pos=[]
pos_item=[]
pos_fina=[]
for i in range(1,len(bseq)):
if bseq[i]=='0' and bseq[i-1]=='1':
end=i
pos_item.append([start,end])
elif bseq[i]=='1' and i==len(bseq)-1 and bseq[i-1]!='0':
end=i
pos_item.append([start,end+1])
elif bseq[i]=='1' and bseq[i-1]=='0':
start=i
if pos_item:
for pos in pos_item:
pos_fina.append([pos[0]+int(list_min)-1,pos[1]+int(list_min)-1])
else:
pos_fina=[]
#print("pos_fina:")
#print(pos_fina)
return pos_fina
'''
test_bseq='00011111110000000000001100'
list_min=30
p=transfer_loc(test_bseq,list_min)
'''
def list_to_binary_string(mylist):
#print("list to binary:")
#print(mylist)
list_min=mylist[0][0]
list_max=mylist[0][1]
for l in mylist:
if l[0]<list_min:
list_min=l[0]
if l[1]>list_max:
list_max=l[1]
#print("list_max:")
#print(list_max)
#print("list_min:")
#print(list_min)
seqlen=int(list_max)-int(list_min)+1 #gff 1-base
tmp_seq='0'+'0'*seqlen
for l in mylist:
bseq=transfer_binary(seqlen,int(l[0])-int(list_min),int(l[1])-int(list_min) )
##print("curbseq:")
##print(bseq)
tmp_seq_a=[]
for i in range(0,seqlen):
if int(tmp_seq[i])+int(bseq[i])>0:
tmp_seq_a.append('1')
else:
tmp_seq_a.append('0')
tmp_seq=''.join(tmp_seq_a)
##print("cur_temp_seq:")
##print(tmp_seq)
tmp_seq=''.join(tmp_seq_a)
#tmp_seq=str(tmp_seq_a)
##print(tmp_seq_a)
##print("tmp_seq:")
##print(tmp_seq)
return [tmp_seq,list_min,list_max]
def region_merge(list1,list2): #list1+list2
#print("region merge:")
#print(" list1:")
#print(list1)
#print("list2:")
#print(list2)
if list1:
n_bl1=list_to_binary_string(list1)
else:
n_bl2=list_to_binary_string(list2)
merge_pos=transfer_loc(''.join(n_bl2[0]),n_bl2[1])
return merge_pos
if list2:
n_bl2=list_to_binary_string(list2)
else:
n_bl1=list_to_binary_string(list1)
merge_pos=transfer_loc(''.join(n_bl1[0]),n_bl1[1])
return merge_pos
list_min=min(int(n_bl1[1]),int(n_bl2[1]))
list_max=max(int(n_bl1[2]),int(n_bl2[2]))
bl1='0'*(int(n_bl1[1])-list_min)+n_bl1[0]+'0'*(list_max-int(n_bl1[2]))
bl2='0'*(int(n_bl2[1])-list_min)+n_bl2[0]+'0'*(list_max-int(n_bl2[2]))
bl3=[]
merge_pos=[]
#print("bl1:")
#print(bl1)
#print("bl2:")
#print(bl2)
for i in range(0,int(list_max)-int(list_min)):
if int(bl1[i])+int(bl2[i])>0:
bl3.append('1')
else:
bl3.append('0')
merge_pos=transfer_loc(''.join(bl3),list_min)
##print("merge_pos:")
##print(merge_pos)
return merge_pos
def region_insect(list1,list2): #list2-list1
#print("region intersect:")
if list1:
n_bl1=list_to_binary_string(list1)
else:
return list1
if list2:
n_bl2=list_to_binary_string(list2)
else:
return list1
##print("region intersect:")
##print(" list1:")
##print(list1)
##print("list2:")
##print(list2)
bl3=[]
insec_pos=[]
list_min=min(int(n_bl1[1]),int(n_bl2[1]))
list_max=max(int(n_bl1[2]),int(n_bl2[2]))
bl1='0'*(int(n_bl1[1])-list_min)+n_bl1[0]+'0'*(list_max-int(n_bl1[2]))
bl2='0'*(int(n_bl2[1])-list_min)+n_bl2[0]+'0'*(list_max-int(n_bl2[2]))
##print("bl1:")
##print(bl1)
##print("bl2:")
##print(bl2)
for i in range(0,len(bl1)):
if bl1[i]=='0' and bl2[i]=='1':
bl3.append('1')
else:
bl3.append('0')
insec_pos=transfer_loc(''.join(bl3),list_min)
insec_pos_fina=[]
#print("insec pos:")
#print(insec_pos)
##print("insec_pos_fina:")
##print(insec_pos_fina)
return insec_pos
'''
test_l2b=[[0,0],[2,9],[12,19]]
test_insec=[[1,5],[7,10]]
t1=list_to_binary_string(test_l2b)
t2=transfer_loc(t1[0],t1[1])
t3=region_insect(test_l2b,test_insec)
'''
class Gene(list):
def __init__(self,gene_id='',gene_name='',gene_type='',genome_start='',genome_end='',genome_chr='',source='',genome_strand='',\
exonic=[],intronic=[],three_utr=[],five_utr=[],start_codon=[],stop_codon=[],cds=[],unknown=[],out_file=''):
self.gene_id=gene_id
self.gene_name=gene_name
self.gene_type=gene_type
self.genome_chr=genome_chr
self.genome_start=genome_start
self.genome_end=genome_end
self.source=source
self.genome_strand=genome_strand
self.exonic=exonic
self.intronic=intronic
self.three_utr=three_utr
self.five_utr=five_utr
self.start_codon=start_codon
self.stop_codon=stop_codon
self.cds=cds
self.unknown=unknown
self.out_file=out_file
def define_exon(self):
exon_list=[]
exon_merge=region_merge(exon_list,self.exonic)
self.exonic=exon_merge
#print("exon_merge:")
#print(exon_merge)
def define_cds(self):
cds_list=[]
cds_merge=region_merge(cds_list,self.cds)
self.cds=cds_merge
if self.exonic:
cds_reduce=region_insect(self.exonic,self.cds)
self.cds=cds_reduce
self.cds=fix_position(self.cds)
#print("cds region:")
#print(self.cds)
def define_start_codon(self):
start_codon_list=[]
start_codon_merge=region_merge(start_codon_list,self.start_codon)
self.start_codon=start_codon_merge
#print("start_codon list:")
#print(self.start_codon)
if self.exonic:
start_codon_reduce_exon=region_insect(self.exonic,self.start_codon)
self.start_codon=start_codon_reduce_exon
#print("start codon exclude exon:")
#print(self.start_codon)
if self.cds and self.start_codon:
start_codon_reduce_cds=region_insect(self.cds,self.start_codon)
self.start_codon=start_codon_reduce_cds
self.start_codon=fix_position(self.start_codon)
#print("start codon exclude cds:")
#print(self.start_codon)
def define_stop_codon(self):
stop_codon_list=[]
stop_codon_merge=region_merge(stop_codon_list,self.stop_codon)
self.stop_codon=stop_codon_merge
if self.exonic:
stop_codon_reduce_exon=region_insect(self.exonic,self.stop_codon)
self.stop_codon=stop_codon_reduce_exon
if self.cds and self.stop_codon:
stop_codon_reduce_cds=region_insect(self.cds,self.stop_codon)
self.stop_codon=stop_codon_reduce_cds
self.stop_codon=fix_position(self.stop_codon)
#print("stop region:")
#print(self.stop_codon)
def define_three_utr(self):
three_utr_list=[]
three_utr_merge=region_merge(three_utr_list,self.three_utr)
self.three_utr=three_utr_merge
if self.exonic:
three_utr_reduce_exon=region_insect(self.exonic,self.three_utr)
self.three_utr=three_utr_reduce_exon
if self.cds and self.three_utr:
three_utr_reduce_cds=region_insect(self.cds,self.three_utr)
self.three_utr=three_utr_reduce_cds
if self.start_codon and self.three_utr:
three_utr_reduce_startcondon=region_insect(self.start_codon,self.three_utr)
self.three_utr=three_utr_reduce_startcondon
if self.stop_codon and self.three_utr:
three_utr_reduce_endcondon=region_insect(self.stop_codon,self.three_utr)
self.three_utr=three_utr_reduce_endcondon
self.three_utr=fix_position(self.three_utr)
#print("three utr region:")
#print(self.three_utr)
def define_five_utr(self):
five_utr_list=[]
five_utr_merge=region_merge(five_utr_list,self.five_utr)
self.five_utr=five_utr_merge
if self.exonic:
five_utr_reduce_exon=region_insect(self.exonic,self.five_utr)
self.five_utr=five_utr_reduce_exon
if self.cds and self.five_utr:
five_utr_reduce_cds=region_insect(self.cds,self.five_utr)
self.five_utr=five_utr_reduce_cds
if self.start_codon and self.five_utr:
five_utr_reduce_startcondon=region_insect(self.start_codon,self.five_utr)
self.five_utr=five_utr_reduce_startcondon
if self.stop_codon and self.five_utr:
five_utr_reduce_endcondon=region_insect(self.stop_codon,self.five_utr)
self.five_utr=five_utr_reduce_endcondon
self.five_utr=fix_position(self.five_utr)
#print("five utr region:")
#print(self.five_utr)
def define_intronic(self):
intron_list=[]
intron_merge=region_merge(intron_list,self.intronic)
self.intronic=intron_merge
if self.exonic:
intron_reduce_exon=region_insect(self.exonic,self.intronic)
self.intronic=intron_reduce_exon
if self.cds and self.intronic:
intron_reduce_cds=region_insect(self.cds,self.intronic)
self.intronic=intron_reduce_cds
if self.start_codon and self.intronic:
intron_reduce_start_codon=region_insect(self.start_codon,self.intronic)
self.intronic=intron_reduce_start_codon
if self.stop_codon and self.intronic:
intron_reduce_stop_codon=region_insect(self.stop_codon,self.intronic)
self.intronic=intron_reduce_stop_codon
if self.three_utr and self.intronic:
intron_reduce_three_utr=region_insect(self.three_utr,self.intronic)
self.intronic=intron_reduce_three_utr
if self.five_utr and self.intronic:
intron_reduce_five_utr=region_insect(self.five_utr,self.intronic)
self.intronic=intron_reduce_five_utr
self.intronic=fix_position(self.intronic)
#print("intronic region:")
#print(self.intronic)
def define_unknown(self):
self.unknown=[[self.genome_start,self.genome_end]]
if self.exonic:
unknown_reduce_exon=region_insect(self.exonic,self.unknown)
self.unknown=unknown_reduce_exon
if self.cds and self.unknown:
unknown_reduce_cds=region_insect(self.cds,self.unknown)
self.unbkown=unknown_reduce_cds
if self.start_codon and self.unknown:
unknown_reduce_start_codon=region_insect(self.start_codon,self.unknown)
self.known=unknown_reduce_start_codon
if self.stop_codon and self.unknown:
unknown_reduce_stop_codon=region_insect(self.stop_codon,self.unknown)
self.unknown=unknown_reduce_stop_codon
if self.three_utr and self.unknown:
unknown_reduce_three_utr=region_insect(self.three_utr,self.unknown)
self.unknown=unknown_reduce_three_utr
if self.five_utr and self.unknown:
unknown_reduce_five_utr=region_insect(self.five_utr,self.unknown)
self.unknown=unknown_reduce_five_utr
if self.intronic and self.unknown:
unknown_reduce_intron=region_insect(self.intronic,self.unknown)
self.unknown=unknown_reduce_intron
self.unknown=fix_position(self.unknown)
#print("unknown region:")
#print(self.unknown)
def report_gene(self):
with open(self.out_file+'.gene.gff3',"a") as out:
gene_line=self.genome_chr+'\t'+self.source+'\tgene\t'+self.genome_start+'\t'+self.genome_end+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(gene_line+'\n')
with open(self.out_file+'.gene_proteincoding.gff3',"a") as out:
if self.exonic:
#print("#print self.exonic:")
#print(self.exonic)
region='Exonic'
for exon in self.exonic:
if exon[0]<=exon[1]:
exon_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(exon[0])+'\t'+str(exon[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(exon_line+'\n')
if self.cds:
#print("#print self.cds:")
region='CDS'
for cds in self.cds:
if cds[0]<=cds[1]:
cds_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(cds[0])+'\t'+str(cds[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(cds_line+'\n')
if self.start_codon:
#print("#print self.start_codon:")
region='start_codon'
for start_codon in self.start_codon:
if start_codon[0]<=start_codon[1]:
start_codon_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(start_codon[0])+'\t'+str(start_codon[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(start_codon_line+'\n')
if self.stop_codon:
#print("#print self.stop_codon:")
region="stop_codon"
for stop_codon in self.stop_codon:
if stop_codon[0]<=stop_codon[1]:
stop_codon_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(stop_codon[0])+'\t'+str(stop_codon[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(stop_codon_line+'\n')
if self.three_utr:
#print("#print self.three_utr:")
region="three_utr"
for three_utr in self.three_utr:
if three_utr[0]<=three_utr[1]:
three_utr_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(three_utr[0])+'\t'+str(three_utr[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(three_utr_line+'\n')
if self.five_utr:
#print("#print self.five_utr:")
region="five_utr"
for five_utr in self.five_utr:
if five_utr[0]<=five_utr[1]:
five_utr_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(five_utr[0])+'\t'+str(five_utr[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(five_utr_line+'\n')
if self.intronic:
#print("#print self.intronic:")
region="intronic"
for intron in self.intronic:
if intron[0]<=intron[1]:
intron_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(intron[0])+'\t'+str(intron[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(intron_line+'\n')
if self.unknown:
#print("#print self.others:")
region='others'
for unknown in self.unknown:
if unknown[0]<=unknown[1]:
unknown_line=self.genome_chr+'\t'+self.source+'\t'+region+'\t'+str(unknown[0])+'\t'+str(unknown[1])+\
'\t.\t'+self.genome_strand+'\t.\t'+"ID="+self.gene_id+';gene_name='+self.gene_name+';gene_tyep='+self.gene_type
out.write(unknown_line+'\n')
def parse_X9(X9):
x9_dict={}
##print(X9)
for item in X9.split(';'):
nitem=item.split('=')
##print(nitem)
x9_dict[nitem[0]]=nitem[1]
##print(x9_dict)
return x9_dict
def parse_gene(transcript_list,out_file):
if transcript_list[0][2]!='gene':
print(transcript_list[0])
sys.exit("Error gene start!")
#print("error gene!")
#return 0
gene_x9_dict= parse_X9(transcript_list[0][8])
if gene_x9_dict['gene_type'] =="protein_coding":
#print("come a protein coding gene!")
##print(transcript_list)
exonic=[]
cds=[]
start_codon=[]
stop_codon=[]
three_utr=[]
five_utr=[]
intronic=[]
##print("1")
for i in range(1,len(transcript_list)):
##print(transcript_list[i])
i_x9_dict=parse_X9(transcript_list[i][8])
##print(i_x9_dict)
if i_x9_dict['transcript_type']=="protein_coding":
if transcript_list[i][2]=="exon":
exonic.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=='CDS':
cds.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=='start_codon':
start_codon.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=="stop_codon":
stop_codon.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=="five_prime_UTR":
five_utr.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=="three_prime_UTR":
three_utr.append([transcript_list[i][3],transcript_list[i][4]])
elif transcript_list[i][2]=="transcript":
intronic.append([transcript_list[i][3],transcript_list[i][4]])
#else:
#print()
#else:unknown
#else:unknown
#print("curGene:exon:")
#print(exonic)
curGene=Gene(gene_id=gene_x9_dict['gene_id'],gene_name=gene_x9_dict['gene_name'],gene_type=gene_x9_dict['gene_type'],\
genome_start=transcript_list[0][3],genome_end=transcript_list[0][4],genome_chr=transcript_list[0][0],\
source=transcript_list[0][1],genome_strand=transcript_list[0][6],exonic=exonic,cds=cds,start_codon=start_codon,stop_codon=stop_codon,
three_utr=three_utr,five_utr=five_utr,intronic=intronic,out_file=out_file)
if exonic:
#print("parse exon")
#print(gene_id)
#print(exonic)
curGene.define_exon()
if cds:
#print("parse cds")
curGene.define_cds()
if start_codon:
#print("parse start codon")
curGene.define_start_codon()
if stop_codon:
#print("parse stop codon")
curGene.define_stop_codon()
if three_utr:
#print("parse three utr")
curGene.define_three_utr()
if five_utr:
#print("parse five utr")
curGene.define_five_utr()
if intronic:
#print("parse intronic")
curGene.define_intronic()
#print("parse unknown")
curGene.define_unknown()
curGene.report_gene()
#sys.exit("parse gene done!")
else:
curGene=Gene(gene_id=gene_x9_dict['gene_id'],gene_name=gene_x9_dict['gene_name'],gene_type=gene_x9_dict['gene_type'],\
genome_start=transcript_list[0][3],genome_end=transcript_list[0][4],genome_chr=transcript_list[0][0],\
source=transcript_list[0][1],genome_strand=transcript_list[0][6],out_file=out_file)
curGene.report_gene()
#out_file="/workspace/fux/miRNASNP3/mapping_snp_gencode/parse_gencode_gff3/parse_01"
out_file=sys.argv[2]
gff3_file=sys.argv[1]
with open(gff3_file) as infile:
for i in range(0,1):
annotation=infile.readline()
##print(annotation)
line=infile.readline()
nline=line.strip().split()
transcript_list=[nline]
##print(line.strip())
x9_dict=parse_X9(nline[8])
gene_id=x9_dict['gene_id']
gene_name=x9_dict['gene_name']
gene_type=x9_dict['gene_type']
line=infile.readline()
while line:
if not line.startswith('#'):
nline=line.strip().split()
##print(line.strip())
x9_dict=parse_X9(nline[8])
if x9_dict['gene_id']==gene_id and x9_dict['gene_name']==gene_name and x9_dict['gene_type']==gene_type:
transcript_list.append(nline)
else:
##print(transcript_list)
parse_gene(transcript_list,out_file)
nline=line.strip().split()
transcript_list=[nline]
x9_dict=parse_X9(nline[8])
gene_id=x9_dict['gene_id']
gene_name=x9_dict['gene_name']
gene_type=x9_dict['gene_type']
line=infile.readline()
parse_gene(transcript_list,out_file)
| StarcoderdataPython |
11248534 | from pymongo import MongoClient
import pickle
import os
import sys
_current_dir = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.normpath(os.path.join(_current_dir, ".."))
sys.path.append(PROJECT_ROOT)
from lib.config import config
ADDR = config.get('detector_agent', 'mongodb_uri')
DB_NAME = config.get('detector_agent', 'db_name')
client = MongoClient(ADDR)
db = client[DB_NAME]
def load_pickle_to_mongodb(pickle_file_name, col):
cache = pickle.load(open(pickle_file_name))
records = []
for sha1, result in cache.iteritems():
record = {'sha1': sha1, 'result': result}
records.append(record)
print "inserting"
col.insert(records)
col.create_index('sha1')
def dump_cuckoo_sigs():
db = client.cuckoo
col = db.analysis
docs = col.find()
d = {}
for doc in docs:
sigs_desc = [ sig['description'] for sig in doc['signatures'] ]
sha1 = doc['target']['file']['sha1']
d[sha1] = sigs_desc
pickle.dump(d, open('cuckoo_sigs.pickle', 'wb'))
def query_cache(col, sha1):
ret = col.find_one({'sha1': sha1})
if ret:
return ret['result']
def insert_cache(col, sha1, result):
col.insert_one({'sha1': sha1, 'result': result})
def query_classifier_cache(classifier_name, sha1):
col = db[classifier_name]
return query_cache(col, sha1)
def insert_classifier_cache(classifier_name, sha1, result, expected_sig=None):
col = db[classifier_name]
if not expected_sig:
insert_cache(col, sha1, result)
elif result == expected_sig:
# avoid persistent false negative.
insert_cache(col, sha1, result)
| StarcoderdataPython |
11273292 | # Created by Plutoberth
import subprocess, sys
from pathlib import Path
def enter_to_exit():
input("Enter to exit...")
sys.exit()
def ask_for_file():
file_name = input("filename: ")
print("-------------------------------------------------")
return file_name
def add_headers(func_file_lines, file_name):
user_input = input("Add headers? Y/N -- ")
if user_input.lower() != 'y':
return
depth = 0
funcs = []
last_include = 0
for line_number, line in enumerate(func_file_lines):
if line.startswith("#include <"):
last_include = line_number
if "{" in line:
if depth == 0:
funcs.append(func_file_lines[line_number - 1].replace("\n", ";\n"))
depth += 1
if "}" in line:
depth -= 1
# we take from the 2nd element of funcs, because int main is the first
funcs = funcs[1:]
# I was really lazy, awfully sorry
# this is basically just to check for double runs of the program, not for anything the user did.
if func_file_lines[last_include+1:last_include+len(funcs)+1] == funcs:
print("Signatures already in file. No operation needed.")
else:
# We add the function signatures right after the includes
func_file_lines = func_file_lines[0:last_include+1] + funcs + func_file_lines[last_include+1:]
with open(file_name, 'w') as file:
file.writelines(func_file_lines)
for r in funcs:
print(r.replace("\n", ""))
print("-------------------------------------------------")
print("Successfully written {} function sigantures to {}".format(len(funcs), file_name))
def get_file_data(file_name):
my_file = Path(file_name)
if not my_file.is_file():
print("File not found.")
enter_to_exit()
with open(file_name, 'r') as f:
func_file_lines = f.readlines()
return func_file_lines
def compile_c(file_name):
user_input = input("Compile? Y/N -- ")
if user_input.lower() == 'y':
proc = subprocess.Popen('gcc -o {0}.exe {0}.c'.format(file_name.replace(".c", "")), stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
tmp = proc.stdout.read()
# Changing to a proper string
tmp = tmp.decode("utf-8")
print(tmp)
num_of_lines = len(tmp.split("\n")) - 1
if num_of_lines > 0:
# printing the amount of lines in the response.
print("Looks like there were some ({}) errors.".format(num_of_lines))
enter_to_exit()
else:
print("Compiled successfully with 0 errors/warnings..")
def run(file_name):
user_input = input("Run? Y/N -- ")
if user_input.lower() == 'y':
print("Running...")
proc = subprocess.Popen('{}.exe'.format(file_name.replace(".c", "")))
proc.wait()
print("\n")
user_file_name = ask_for_file()
file_lines = get_file_data(user_file_name)
add_headers(file_lines, user_file_name)
compile_c(user_file_name)
run(user_file_name)
enter_to_exit()
| StarcoderdataPython |
11350070 | #import ssubmit
import core
import sys
if __name__ == "__main__":
core.main('status', *sys.argv) | StarcoderdataPython |
3230454 | from client_sdk_python.module import (
Module,
)
import json
import rlp
from hexbytes import HexBytes
from client_sdk_python.utils.transactions import send_obj_transaction
class Debug(Module):
need_analyze = True
need_quota_gas = True
def economicConfig(self):
return json.loads(self.web3.manager.request_blocking("debug_economicConfig", []))
def setValidatorList(self, node_list, pri_key, transaction_cfg={"gas": 210000}):
data_list = []
for node_id in node_list:
data_list.append(bytes.fromhex(node_id))
data = HexBytes(rlp.encode([rlp.encode(int(1900)), rlp.encode(data_list)])).hex()
return send_obj_transaction(self, data, self.web3.stakingAddress, pri_key, transaction_cfg)
def getWaitSlashingNodeList(self):
result = self.web3.manager.request_blocking("debug_getWaitSlashingNodeList", [])
if not result:
return []
return json.loads(result)
| StarcoderdataPython |
319485 | # -*- coding: utf-8 -*-#
#-------------------------------------------------------------------------------
# PROJECT_NAME: design_pattern
# Name: builder.py
# Author: 9824373
# Date: 2020-08-15 16:49
# Contact: <EMAIL>
# Version: V1.0
# Description: 建造模式
#-------------------------------------------------------------------------------
from abc import ABC, ABCMeta, abstractmethod
class product(object):
def __init__(self):
self.parts = []
def show(self):
for part in self.parts:
print(repr(part))
class builder(ABC):
@abstractmethod
def build_name(self):
pass
@abstractmethod
def build_age(self):
pass
@abstractmethod
def build_sex(self):
pass
class undergraduate_builder(builder):
def __init__(self):
self.product = product()
def build_name(self):
self.product.parts.append('student')
def build_age(self):
self.product.parts.append(24)
def build_sex(self,):
self.product.parts.append('female')
class farmer_builder(builder):
def __init__(self,):
self.product = product()
def build_name(self,):
self.product.parts.append('farmer')
def build_age(self,):
self.product.parts.append(54)
def build_sex(self,):
self.product.parts.append('male')
class director(object):
@classmethod
def build(self,builder):
builder.build_name()
builder.build_age()
builder.build_sex()
if __name__ == '__main__':
student_builder = undergraduate_builder()
farmer_builder = farmer_builder()
director.build(student_builder)
director.build(farmer_builder)
student_builder.product.show()
farmer_builder.product.show()
| StarcoderdataPython |
6693492 | # -*- coding: utf-8 -*-
'''
Use three emotional dimensions - valence, arousal and dominance - to describe human perceptions of physical environments.
Interpretations of pleasure: Positive versus negative affective states (e.g. excitement, relaxation, love, and
tranquility versus cruelty, humiliation, disinterest, and boredom)
Interpretations of arousal: Level of mental alertness and physical activity. (e.g. sleep, inactivity, boredom, and
relaxation at the lower end versus wakefulness, bodily tension, strenuous
exercise, and concentration at the higher end).
Interpretations of dominance: Ranges from feelings of total lack control or influence on events and surroundings to
the opposite extreme of feeling influential and in control
'''
from __future__ import print_function
import os
import warnings
from engine.object_detection_branch.retina_net.single_img_inference import RetinaNet_single_img_detection
from engine.object_detection_branch.ssd_detector import single_shot_detector
from applications.emotic_utils import _obtain_single_model_VAD,prepare_input_data, _obtain_nb_classifiers, _obtain_ensembling_weights,\
_obtain_two_models_ensembling_VAD,_obtain_three_models_ensembling_VAD
from scipy.misc import imread
from matplotlib import pyplot as plt
from utils.generic_utils import crop, round_number
def single_img_VAD_inference(img_path,
object_detector_backend,
model_a_backend_name,
model_b_backend_name = None,
model_c_backend_name = None):
"""Performs single image inference.
It also saves the original image (`img_path`) with the overlaid recognised humans bounding boxes and their VAD values.
# Arguments
img_path: Path to image file
object_detector_backend: Backend with which the objects will be detected. One of `SSD` or `RetinaNet`.
the person who’s feelings are to be estimated.
model_backend_name: One of `VGG16`, `VGG19` or `ResNet50`.
Note that EMOTIC model has already combined `model_backend_name` features with `VGG16_Places365` features at training stage,
but for simplicity reasons only the body backbone CNN name is adjustable.
# Returns
Three integer values corresponding to `valence`, `arousal` and `dominance`.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if not (object_detector_backend in {'SSD', 'RetinaNet'}):
raise ValueError('The `object_detector_backend_name` argument should be either '
'`SSD` for Single-Shot MultiBox Detector or `RetinaNet` for RetinaNet dense detector. ')
(head, tail) = os.path.split(img_path)
filename_only = os.path.splitext(tail)[0]
nb_classifiers, classifiers_names = _obtain_nb_classifiers(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
save_as = 'results/'+filename_only + '_' + classifiers_names + '.png'
if nb_classifiers == 1:
model_a = _obtain_single_model_VAD(model_a_backend_name)
elif nb_classifiers == 2:
w_model_a, w_model_b = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b = _obtain_two_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name)
elif nb_classifiers == 3:
w_model_a, w_model_b, w_model_c = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b, model_c = _obtain_three_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
numpy_img_path = imread(img_path)
# ~Object detection branch~
if object_detector_backend == 'SSD':
coordinates, persons = single_shot_detector(img_path=img_path, imshow=False)
elif object_detector_backend == 'RetinaNet':
coordinates, persons = RetinaNet_single_img_detection(img_path=img_path, imshow=False)
# configure colours for bounding box and text
bounding_box_colour_rgbvar = (53, 42, 146)
bounding_box_colour_rgbvar2 = [x / 255.0 for x in bounding_box_colour_rgbvar]
text_colour_rgbvar = (214, 86, 100)
text_colour_rgbvar2 = [x / 255.0 for x in text_colour_rgbvar]
if persons != 0:
print('--IMAGE INFERENCE FOR |%d| PERSON(S) FOUND:' % persons)
plt.figure(figsize=(10, 12))
plt.imshow(numpy_img_path)
current_axis = plt.gca()
counter = 1
valence_sum = 0
arousal_sum = 0
dominance_sum = 0
for box in coordinates:
# checks if the number of persons have been reached in order to stop the for loop.
# if counter > persons:
# break
if box[0] != 0:
print('[INFO] Person #%d' % counter)
crop(image_path=img_path, coords=box, saved_location='body_img.jpg')
x1, x2 = prepare_input_data(body_path = 'body_img.jpg',
image_path = img_path)
if nb_classifiers == 1:
preds = model_a.predict([x1, x2])
elif nb_classifiers == 2:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
if w_model_a is None and w_model_b is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.50 * (preds_model_a + preds_model_b)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b
elif nb_classifiers == 3:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
preds_model_c = model_c.predict([x1, x2])
if w_model_a is None and w_model_b is None and w_model_c is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.33 * (preds_model_a + preds_model_b + preds_model_c)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b + w_model_c * preds_model_c
# Uncomment to round predicted values
# valence = round_number(preds[0][0])
# arousal = round_number(preds[0][1])
# dominance = round_number(preds[0][2])
valence = preds[0][0]
arousal = preds[0][1]
dominance = preds[0][2]
print(' Valence (V) -- how pleasant the emotions are: ', valence)
print(' Arousal (A) -- unrest level of the person(s): ', arousal)
print('Dominance (D) -- control level of the situation: ', dominance)
valence_sum += valence
arousal_sum += arousal
dominance_sum += dominance
# current_axis.add_patch(
# plt.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1],
# color=text_colour_rgbvar2,
# fill=False,
# linewidth=3.5))
counter += 1
global_valence = valence_sum/persons
global_arousal = arousal_sum/persons
global_dominance = dominance_sum/persons
print ('\n')
print('--GLOBAL EMOTIONAL TRAITS:')
print(" Valence (V) -- how pleasant the emotions are: %.2f" % global_valence)
print(" Arousal (A) -- unrest level of the person(s): %.2f" % global_arousal)
print("Dominance (D) -- control level of the situation: %.2f" % global_dominance)
# print(' Valence (V) -- how pleasant the emotions are: ', global_valence)
# print(' Arousal (A) -- unrest level of the person(s): ', global_arousal)
# print('Dominance (D) -- control level of the situation: ', global_dominance)
#
# overlayed_text = 'Global emotional traits:' + '\n' '(V): ' + str(round(global_valence,2)) + '\n' '(A): ' + str(round(global_arousal,2)) + '\n' '(D): ' + \
# str(round(global_dominance,2))
overlayed_text = 'DOMINANCE: ' + \
str(round(global_dominance,2))
current_axis.text(5, -10, overlayed_text, size='x-large', color='white',
bbox={'facecolor': bounding_box_colour_rgbvar2, 'alpha': 1.0})
plt.axis('off')
plt.savefig(save_as)
plt.show()
os.remove("body_img.jpg")
else:
warnings.warn('No global emotional traits were identified: '
'there was no person detected in the image.')
global_valence = 0
global_arousal = 0
global_dominance = 0
return global_valence, global_arousal, global_dominance
def single_img_VAD_inference_return_only(img_path,
object_detector_backend,
model_a_backend_name,
model_b_backend_name=None,
model_c_backend_name=None):
"""Performs single image inference.
# Arguments
img_path: Path to image file
object_detector_backend: Backend with which the objects will be detected. One of `SSD` or `RetinaNet`.
the person who’s feelings are to be estimated.
model_backend_name: One of `VGG16`, `VGG19` or `ResNet50`.
Note that EMOTIC model has already combined `model_backend_name` features with `VGG16_Places365` features at training stage,
but for simplicity reasons only the body backbone CNN name is adjustable.
# Returns
Three integer values corresponding to `valence`, `arousal` and `dominance`.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if not (object_detector_backend in {'SSD', 'RetinaNet'}):
raise ValueError('The `object_detector_backend_name` argument should be either '
'`SSD` for Single-Shot MultiBox Detector or `RetinaNet` for RetinaNet dense detector. ')
nb_classifiers, classifiers_names = _obtain_nb_classifiers(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
if nb_classifiers == 1:
model_a = _obtain_single_model_VAD(model_a_backend_name)
elif nb_classifiers == 2:
w_model_a, w_model_b = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b = _obtain_two_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name)
elif nb_classifiers == 3:
w_model_a, w_model_b, w_model_c = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b, model_c = _obtain_three_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
# Uncomment for extra verbosity
# print('[INFO] EMOTIC VAD models have been loaded')
# numpy_img_path = imread(img_path)
# ~Object detection branch~
if object_detector_backend == 'SSD':
coordinates, persons = single_shot_detector(img_path=img_path, imshow=False)
elif object_detector_backend == 'RetinaNet':
coordinates, persons = RetinaNet_single_img_detection(img_path=img_path, imshow=False)
# Uncomment for extra verbosity
# print('[INFO] Objects in image have been detected')
if persons != 0:
# Uncomment for extra verbosity
# print('[INFO] Carrying out continuous emotion recognition in VAD space for %d person(s) found: ' % persons)
counter = 1
dominance_sum = 0
valence_sum = 0
for box in coordinates:
# checks if the number of persons have been reached in order to stop the for loop.
# if counter > persons:
# break
if box[0] != 0:
# Uncomment for extra verbosity
# print('[INFO] Person #%d' % counter)
crop(image_path=img_path, coords=box, saved_location='body_img.jpg')
x1, x2 = prepare_input_data(body_path = 'body_img.jpg',
image_path = img_path)
if nb_classifiers == 1:
preds = model_a.predict([x1, x2])
elif nb_classifiers == 2:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
if w_model_a is None and w_model_b is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.50 * (preds_model_a + preds_model_b)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b
elif nb_classifiers == 3:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
preds_model_c = model_c.predict([x1, x2])
if w_model_a is None and w_model_b is None and w_model_c is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.33 * (preds_model_a + preds_model_b + preds_model_c)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b + w_model_c * preds_model_c
# Uncomment to round predicted values
# valence = round_number(preds[0][0])
# arousal = round_number(preds[0][1])
# dominance = round_number(preds[0][2])
valence = preds[0][0]
# arousal = preds[0][1]
dominance = preds[0][2]
# Uncomment for extra verbosity
# print(' Valence (V): ', valence)
# print(' Arousal (A): ', arousal)
# print('Dominance (D): ', dominance)
valence_sum += valence
# arousal_sum += arousal
dominance_sum += dominance
counter += 1
global_valence = valence_sum/persons
# global_arousal = arousal_sum/persons
global_dominance = dominance_sum/persons
# Uncomment for extra verbosity
# print ('\n')
# print('[INFO] Global emotional traits::')
# print(' Valence (V) -- how pleasant the emotions are: ', global_valence)
# print(' Arousal (A) -- unrest level of the person(s): ', global_arousal)
# print('Dominance (D) -- control level of the situation: ', global_dominance)
# print('\n')
os.remove("body_img.jpg")
else:
print("[WARNING] No global emotional traits were identified -- no `people` found in input image `", img_path, '`')
global_valence = 0
# global_arousal = 0
global_dominance = 0
return global_valence, global_dominance
def single_img_VAD_inference_with_bounding_boxes(img_path,
object_detector_backend,
model_a_backend_name,
model_b_backend_name=None,
model_c_backend_name=None):
"""Performs single image inference.
It also saves the original image (`img_path`) with the overlaid recognised humans bounding boxes and their VAD values.
# Arguments
img_path: Path to image file
object_detector_backend: Backend with which the objects will be detected. One of `SSD` or `RetinaNet`.
the person who’s feelings are to be estimated.
model_backend_name: One of `VGG16`, `VGG19` or `ResNet50`.
Note that EMOTIC model has already combined `model_backend_name` features with `VGG16_Places365` features at training stage,
but for simplicity reasons only the body backbone CNN name is adjustable.
# Returns
Three integer values corresponding to `valence`, `arousal` and `dominance`.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if not (object_detector_backend in {'SSD', 'RetinaNet'}):
raise ValueError('The `object_detector_backend_name` argument should be either '
'`SSD` for Single-Shot MultiBox Detector or `RetinaNet` for RetinaNet dense detector. ')
(head, tail) = os.path.split(img_path)
filename_only = os.path.splitext(tail)[0]
nb_classifiers, classifiers_names = _obtain_nb_classifiers(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
save_as = 'results/'+filename_only + '_' + classifiers_names + '.png'
if nb_classifiers == 1:
model_a = _obtain_single_model_VAD(model_a_backend_name)
elif nb_classifiers == 2:
w_model_a, w_model_b = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b = _obtain_two_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name)
elif nb_classifiers == 3:
w_model_a, w_model_b, w_model_c = _obtain_ensembling_weights(nb_classifiers=nb_classifiers,
model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
model_a, model_b, model_c = _obtain_three_models_ensembling_VAD(model_a_name=model_a_backend_name,
model_b_name=model_b_backend_name,
model_c_name=model_c_backend_name)
numpy_img_path = imread(img_path)
# ~Object detection branch~
if object_detector_backend == 'SSD':
coordinates, persons = single_shot_detector(img_path=img_path, imshow=False)
elif object_detector_backend == 'RetinaNet':
coordinates, persons = RetinaNet_single_img_detection(img_path=img_path, imshow=False)
# configure colours for bounding box and text
bounding_box_colour_rgbvar = (53, 42, 146)
bounding_box_colour_rgbvar2 = [x / 255.0 for x in bounding_box_colour_rgbvar]
text_colour_rgbvar = (214, 86, 100)
text_colour_rgbvar2 = [x / 255.0 for x in text_colour_rgbvar]
if persons != 0:
print('--IMAGE INFERENCE FOR |%d| PERSON(S) FOUND:' % persons)
plt.figure(figsize=(10, 12))
plt.imshow(numpy_img_path)
current_axis = plt.gca()
counter = 1
valence_sum = 0
arousal_sum = 0
dominance_sum = 0
for box in coordinates:
# checks if the number of persons have been reached in order to stop the for loop.
# if counter > persons:
# break
if box[0] != 0:
print('[INFO] Person #%d' % counter)
crop(image_path=img_path, coords=box, saved_location='body_img.jpg')
x1, x2 = prepare_input_data(body_path = 'body_img.jpg',
image_path = img_path)
if nb_classifiers == 1:
preds = model_a.predict([x1, x2])
elif nb_classifiers == 2:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
if w_model_a is None and w_model_b is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.50 * (preds_model_a + preds_model_b)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b
elif nb_classifiers == 3:
# obtain predictions
preds_model_a = model_a.predict([x1, x2])
preds_model_b = model_b.predict([x1, x2])
preds_model_c = model_c.predict([x1, x2])
if w_model_a is None and w_model_b is None and w_model_c is None:
# This new prediction array should be more accurate than any of the initial ones
preds = 0.33 * (preds_model_a + preds_model_b + preds_model_c)
else:
preds = w_model_a * preds_model_a + w_model_b * preds_model_b + w_model_c * preds_model_c
# Uncomment to round predicted values
# valence = round_number(preds[0][0])
# arousal = round_number(preds[0][1])
# dominance = round_number(preds[0][2])
valence = preds[0][0]
arousal = preds[0][1]
dominance = preds[0][2]
print(' Valence (V) -- how pleasant the emotions are: ', valence)
print(' Arousal (A) -- unrest level of the person(s): ', arousal)
print('Dominance (D) -- control level of the situation: ', dominance)
valence_sum += valence
arousal_sum += arousal
dominance_sum += dominance
current_axis.add_patch(
plt.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1],
color=text_colour_rgbvar2,
fill=False,
linewidth=3.5))
people_VAD_overlayed_text = '(V): ' + str(round(valence, 2)) + '\n' '(A): ' \
+ str(round(arousal, 2)) + '\n' '(D): ' \
+ str(round(dominance, 2))
current_axis.text(box[0]+5, box[1]-10, people_VAD_overlayed_text, size='x-large', color='white',
bbox={'facecolor': bounding_box_colour_rgbvar2, 'alpha': 1.0})
counter += 1
global_valence = valence_sum/persons
global_arousal = arousal_sum/persons
global_dominance = dominance_sum/persons
print ('\n')
print('--GLOBAL EMOTIONAL TRAITS:')
print(" Valence (V) -- how pleasant the emotions are: %.2f" % global_valence)
print(" Arousal (A) -- unrest level of the person(s): %.2f" % global_arousal)
print("Dominance (D) -- control level of the situation: %.2f" % global_dominance)
# print(' Valence (V) -- how pleasant the emotions are: ', global_valence)
# print(' Arousal (A) -- unrest level of the person(s): ', global_arousal)
# print('Dominance (D) -- control level of the situation: ', global_dominance)
overlayed_text = '(V): ' + str(round(global_valence,2)) + '\n' '(A): ' + str(round(global_arousal,2)) + '\n' '(D): ' + \
str(round(global_dominance,2))
# current_axis.text(0, 0, overlayed_text, size='x-large', color='white',
# bbox={'facecolor': bounding_box_colour_rgbvar2, 'alpha': 1.0})
plt.axis('off')
plt.savefig(save_as)
plt.show()
os.remove("body_img.jpg")
else:
warnings.warn('No global emotional traits were identified: '
'there was no person detected in the image.')
global_valence = 0
global_arousal = 0
global_dominance = 0
return global_valence, global_arousal, global_dominance
if __name__ == "__main__":
img_path = '/home/sandbox/Desktop/Two-class-HRV/ChildLabour/test/no_child_labour/no_child_labour_0015.jpg'
model_a_backend_name = 'VGG19'
model_b_backend_name = 'VGG16'
model_c_backend_name = 'ResNet50'
valence, arousal, dominance = single_img_VAD_inference(img_path = img_path,
object_detector_backend='RetinaNet',
model_a_backend_name = model_a_backend_name,
model_b_backend_name=model_b_backend_name,
model_c_backend_name=model_c_backend_name,
) | StarcoderdataPython |
3524368 | <gh_stars>1-10
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
from typing import Any, Text, Dict, List
import requests
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.events import SlotSet, FollowupAction
from rasa_sdk.forms import FormAction
import urllib
import json
# ENDPOINTS = {
# "base": "https://data.medicare.gov/resource/{}.json",
# "xubh-q36u": {
# "city_query": "?city={}",
# "zip_code_query": "?zip_code={}",
# "id_query": "?provider_id={}"
# },
# "b27b-2uc7": {
# "city_query": "?provider_city={}",
# "zip_code_query": "?provider_zip_code={}",
# "id_query": "?federal_provider_number={}"
# },
# "9wzi-peqs": {
# "city_query": "?city={}",
# "zip_code_query": "?zip={}",
# "id_query": "?provider_number={}"
# }
# }
hospitals = {
"110070": "<NAME>"
}
class ActionFindAndShowTimeZone(Action):
def name(self) -> Text:
return "action_find_and_show_time_zone"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
city = tracker.get_slot("city")
url = "https://www.amdoren.com/api/timezone.php?api_key=QbQVAKsgdVP6KtUVxngHQPScRAaVHt&loc={}".format(
city)
data = requests.get(url)
json_data = data.json()
timezone = json_data["time"]
if timezone is None:
output = "Could not find the time zone for {}".format(city)
else:
output = "The current time in {} is {} derived from {}".format(
city, timezone, json_data)
dispatcher.utter_message(text=output)
return []
# class FacilityForm(FormAction):
# def name(self) -> Text:
# return "timezone_form"
# @staticmethod
# def required_slots(tracker:Tracker) -> List[Text]:
# return["city"]
# def slot_mappings(self) -> Dict[Text, Any]:
# return{"city": self.from_entity(entity="city",
# intent=[])}
# def submit(self,
# dispatcher: CollectingDispatcher,
# tracker: Tracker,
# domain: Dict[Text, Any]
# ) -> List[Dict]:
| StarcoderdataPython |
191766 | <filename>fv3config/config/__init__.py
from .namelist import (
config_to_namelist,
config_from_namelist,
)
from .rundir import write_run_directory
from .alter import enable_restart, set_run_duration
from .derive import get_n_processes, get_run_duration, get_timestep
from .nudging import get_nudging_assets, enable_nudging
from .diag_table import (
DiagFileConfig,
DiagFieldConfig,
DiagTable,
Packing,
FileFormat,
)
from ._serialization import load, dump
def get_default_config():
"""Removed, do not use."""
raise NotImplementedError("get_default_config has been removed")
| StarcoderdataPython |
9657277 | from ..core import Session, RequestError, const
import traceback
import json
import pandas as pd
class League(object):
version = const.VERSIONS['league']
@classmethod
def get_challenger_league(cls, session, queue, params={}):
r = None
try:
r = session.request(
url = const.URLS_LEAGUE['challenger league'],
url_params = {
'version': cls.version,
'queue': queue
},
params = params
)
except RequestError as req_err:
print(req_err)
# traceback.print_exc()
except Exception as e:
print(e)
# traceback.print_exc()
req_table = pd.io.json.json_normalize(
data=r,
record_path=['entries'],
meta=['leagueId', 'tier']
)
return req_table
@classmethod
def get_master_league(cls, session, queue, params={}):
r = None
try:
r = session.request(
url = const.URLS_LEAGUE['master league'],
url_params = {
'version': cls.version,
'queue': queue
},
params = params
)
except RequestError as req_err:
print(req_err)
# traceback.print_exc()
except Exception as e:
print(e)
# traceback.print_exc()
req_table = pd.io.json.json_normalize(
data=r,
record_path=['entries'],
meta=['leagueId', 'tier']
)
return req_table
@classmethod
def get_grandmaster_league(cls, session, queue, params={}):
r = None
try:
r = session.request(
url = const.URLS_LEAGUE['grandmaster league'],
url_params = {
'version': cls.version,
'queue': queue
},
params = params
)
except RequestError as req_err:
print(req_err)
# traceback.print_exc()
except Exception as e:
print(e)
# traceback.print_exc()
req_table = pd.io.json.json_normalize(
data=r,
record_path=['entries'],
meta=['leagueId', 'tier']
)
return req_table
@classmethod
def get_league(cls, session, league_id, params={}):
r = None
try:
r = session.request(
url = const.URLS_LEAGUE['league'],
url_params = {
'version': cls.version,
'league_id': league_id
},
params = params
)
except RequestError as req_err:
print(req_err)
# traceback.print_exc()
except Exception as e:
print(e)
# traceback.print_exc()
req_table = pd.io.json.json_normalize(
data=r,
record_path=['entries'],
meta=['leagueId', 'tier']
)
return req_table
@classmethod
def get_league_positions(cls, session, summoner_id, params={}):
r = None
try:
r = session.request(
url = const.URLS_LEAGUE['league positions'],
url_params = {
'version': cls.version,
'summoner_id': summoner_id
},
params = params
)
except RequestError as req_err:
print(req_err)
# traceback.print_exc()
except Exception as e:
print(e)
# traceback.print_exc()
req_table = pd.io.json.json_normalize(
data=r#,
# record_path=['entries'],
# meta=['leagueId', 'tier']
)
return req_table
| StarcoderdataPython |
4906424 | <reponame>ROBACON/mobspy<filename>example_models/application_models/donor_receptor.py
import sys, os
from mobspy import *
"""
Here we have a model with a Phage, Donor and Receptor with resources being considered
Both the Donor and Receptor can reproduce with resources
The Donor can produce Phages with resources
Only the Receptor can be infected by the phages
"""
# Variable declaration
Resource, Mortal, Infectible = BaseSpecies(3)
# Resource Definition
AA, Glu = New(Resource, 2)
AA(100), Glu(100)
# Donor and Phage Creation
Mortal >> Zero [0.1]
Donor, Phage = New(Mortal, 2)
Donor(100)
# Here both Resources can react with the Donor due to inheritance
dup_rate = lambda _,resource: 0.2 if resource.is_a(AA) else 0.1
Donor + Resource >> 2*Donor [dup_rate]
Donor + Resource >> Donor + Resource + Phage [0.1]
Infectible.low_inf >> Infectible.high_inf [0.1]
Receptor = Mortal*Infectible
inf_rate = lambda receptor: 0.2 if receptor.high_inf else 0.1
Receptor.not_infected + Phage >> Receptor.early_infection [inf_rate]
Receptor.early_infection >> Receptor.late_infection [0.1]
# We separate the reproduction reactions, because Receptor will give birth to a low_inf one
Receptor + Resource >> Receptor.low_inf + Receptor [dup_rate]
MySim = Simulation(Donor | Receptor | Phage | AA | Glu)
print(MySim.compile())
| StarcoderdataPython |
8043004 | <reponame>petewasher/performance-comparisons<gh_stars>0
#! /usr/bin/python
import timeit
import msgpack
import umsgpack
import json
my_object = {"array":[1,2,3],"boolean":True,"null":None,"number":123,"object":{"a":"b","c":"d","e":"f"},"string":"Hello World"}
packed = msgpack.packb(my_object)
upacked = umsgpack.packb(my_object)
def test_a(packed):
unpacked = msgpack.unpackb(packed, use_list=True)
def test_b(packed):
unpacked = msgpack.unpackb(packed, use_list=False)
def test_c(packed):
unpacked = umsgpack.unpackb(packed)
if __name__ == '__main__':
assert msgpack.unpackb(packed, use_list=True) == my_object
assert umsgpack.unpackb(upacked) == my_object
assert umsgpack.unpackb(packed) == my_object
# Not identical output - use_list false uses tuples internally which results in non-symmertical output
#assert msgpack.unpackb(packed, use_list=False) == my_object
print "Functional comparison succeeded. Checking t_10^6 operations..."
print (timeit.timeit('test_a(packed)', setup="from __main__ import test_a, packed"))
print (timeit.timeit('test_b(packed)', setup="from __main__ import test_b, packed"))
print (timeit.timeit('test_c(upacked)', setup="from __main__ import test_c, upacked"))
| StarcoderdataPython |
1695222 | # flake8: noqa: W293
import logging
import textwrap
from typing import Optional
from irrd.utils import email
from .handler import ChangeSubmissionHandler
logger = logging.getLogger(__name__)
def handle_email_submission(email_txt: str) -> Optional[ChangeSubmissionHandler]:
handler = None
try:
msg = email.EmailParser(email_txt)
request_meta = {
'Message-ID': msg.message_id,
'From': msg.message_from,
'Date': msg.message_date,
'Subject': msg.message_subject,
}
except Exception as exc:
logger.critical(f'An exception occurred while attempting to parse the following update e-mail: {email_txt}\n'
f'--- traceback for {exc} follows:', exc_info=exc)
return None
if not msg.message_from:
logger.critical(f'No from address was found while attempting to parse the following update e-mail - '
f'update not processed: {email_txt}\n')
return None
try:
if not msg.body:
logger.warning(f'Unable to extract message body from e-mail {msg.message_id} from {msg.message_from}')
subject = f'FAILED: {msg.message_subject}'
reply_content = textwrap.dedent(f"""
Unfortunately, your message with ID {msg.message_id}
could not be processed, as no text/plain part could be found.
Please try to resend your message as plain text email.
""")
else:
handler = ChangeSubmissionHandler(msg.body, msg.pgp_fingerprint, request_meta)
logger.info(f'Processed e-mail {msg.message_id} from {msg.message_from}: {handler.status()}')
logger.debug(f'Report for e-mail {msg.message_id} from {msg.message_from}: {handler.submitter_report()}')
subject = f'{handler.status()}: {msg.message_subject}'
reply_content = handler.submitter_report()
except Exception as exc:
logger.critical(f'An exception occurred while attempting to process the following update: {email_txt}\n'
f'--- traceback for {exc} follows:', exc_info=exc)
subject = f'ERROR: {msg.message_subject}'
reply_content = textwrap.dedent(f"""
Unfortunately, your message with ID {msg.message_id}
could not be processed, due to an internal error.
""")
try:
email.send_email(msg.message_from, subject, reply_content)
if handler:
handler.send_notification_target_reports()
except Exception as exc:
logger.critical(f'An exception occurred while attempting to send a reply to an update: '
f'{subject}\n{reply_content}\n --- traceback for {exc} follows:', exc_info=exc)
return handler
| StarcoderdataPython |
9772689 | <gh_stars>0
# <NAME>
# ADS UNIFIP
# Estrutura de Repetição
# 25/03/2020
'''
32- Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário.
Ex.: 5!=5.4.3.2.1=120. A saída deve ser conforme o exemplo abaixo:
Fatorial de: 5
5! = 5 . 4 . 3 . 2 . 1 = 120
'''
print('=' * 40)
print('{:=^40}'.format(" 'CALCULE O FATORIAL' "))
print('=' * 40, '\n')
num = int(input('Digite um número: '))
total = 1
print(f'Fatorial de: {num}')
print(f'{num}! = ', end=' ')
for c in range(num, 1, -1):
total *= c
print(f'{c}', end = '.')
print(f'1 = {total}')
| StarcoderdataPython |
3545204 | from rest_framework import viewsets
from user import serializers, models, filters
class UserView(viewsets.ModelViewSet):
queryset = models.User.objects.all()
serializer_class = serializers.UserSerializer
filter_class = filters.UserFilter
| StarcoderdataPython |
312767 | #!/usr/bin/env python
"""
Wrapper to ROS subscriber.
Author: <NAME>
Date: 05/18
"""
import rospy
class ROSSubscriber(object):
def __init__(self, _topic, _message_type):
"""
ROSSubscriber constructor.
:param _topic: string, ROS topic to publish on
:param _message_type: custom message, published on topic
"""
self.topic = _topic
self.message_type = _message_type
self.sub = rospy.Subscriber(self.topic, self.message_type, self.callback)
def callback(self, data=None):
"""
Called when new message arrives on topic.
:param data: Data on topic
:return:
"""
rospy.loginfo("Received message on topic %s", self.topic)
| StarcoderdataPython |
1964823 | <filename>slides/code/interlude_02_2.py
from datetime import datetime, timedelta
from pathlib import Path
files: list[Path] = []
for p in Path("/tmp").glob("*"):
timestamp = p.stat().st_mtime
ts_date = datetime.fromtimestamp(timestamp)
delta_max = timedelta(days=1)
if datetime.now() - ts_date < delta_max:
files.append(p)
print(files)
| StarcoderdataPython |
307272 | <reponame>politbuero-kampagnen/onegov-cloud<gh_stars>0
import hashlib
from onegov.activity.models import Activity, Attendee, Booking, Occasion
from onegov.user import User
from sqlalchemy import func
class Scoring(object):
""" Provides scoring based on a number of criteria.
A criteria is a callable which takes a booking and returns a score.
The final score is the sum of all criteria scores.
"""
def __init__(self, criteria=None):
self.criteria = criteria or [PreferMotivated()]
def __call__(self, booking):
return sum(criterium(booking) for criterium in self.criteria)
@classmethod
def from_settings(cls, settings, session):
scoring = cls()
# always prefer groups
scoring.criteria.append(PreferGroups.from_session(session))
if settings.get('prefer_in_age_bracket'):
scoring.criteria.append(
PreferInAgeBracket.from_session(session))
if settings.get('prefer_organiser'):
scoring.criteria.append(
PreferOrganiserChildren.from_session(session))
if settings.get('prefer_admins'):
scoring.criteria.append(
PreferAdminChildren.from_session(session))
return scoring
@property
def settings(self):
classes = {c.__class__ for c in self.criteria}
settings = {}
if PreferInAgeBracket in classes:
settings['prefer_in_age_bracket'] = True
if PreferOrganiserChildren in classes:
settings['prefer_organiser'] = True
if PreferAdminChildren in classes:
settings['prefer_admins'] = True
return settings
class PreferMotivated(object):
""" Scores "motivated" bookings higher. A motivated booking is simply a
booking with a higher priority (an attendee would favor a booking he's
excited about.)
"""
@classmethod
def from_session(cls, session):
return cls()
def __call__(self, booking):
return booking.priority
class PreferInAgeBracket(object):
""" Scores bookings whose attendees fall into the age-bracket of the
occasion higher.
If the attendee falls into the age-bracket, the score is 1.0. Each year
difference results in a penalty of 0.1, until 0.0 is reached.
"""
def __init__(self, get_age_range, get_attendee_age):
self.get_age_range = get_age_range
self.get_attendee_age = get_attendee_age
@classmethod
def from_session(cls, session):
attendees = None
occasions = None
def get_age_range(booking):
nonlocal occasions, session
if occasions is None:
occasions = {
o.id: o.age
for o in session.query(Occasion.id, Occasion.age)
.filter(Occasion.period_id == booking.period_id)}
return (
occasions[booking.occasion_id].lower,
occasions[booking.occasion_id].upper - 1
)
def get_attendee_age(booking):
nonlocal attendees, session
if attendees is None:
attendees = {a.id: a.age for a in session.query(
Attendee.id, Attendee.age)}
return attendees[booking.attendee_id]
return cls(get_age_range, get_attendee_age)
def __call__(self, booking):
min_age, max_age = self.get_age_range(booking)
attendee_age = self.get_attendee_age(booking)
if min_age <= attendee_age and attendee_age <= max_age:
return 1.0
else:
difference = min(
abs(min_age - attendee_age),
abs(max_age - attendee_age)
)
return 1.0 - min(1.0, difference / 10.0)
class PreferOrganiserChildren(object):
""" Scores bookings of children higher if their parents are organisers.
This is basically an incentive to become an organiser. A child whose parent
is an organiser gets a score of 1.0, if the parent is not an organiser
a score 0.0 is returned.
"""
def __init__(self, get_is_organiser_child):
self.get_is_organiser_child = get_is_organiser_child
@classmethod
def from_session(cls, session):
organisers = None
def get_is_organiser_child(booking):
nonlocal organisers
if organisers is None:
organisers = {
a.username
for a in session.query(Activity.username)
.filter(Activity.id.in_(
session.query(Occasion.activity_id)
.filter(Occasion.period_id == booking.period_id)
.subquery()
))
}
return booking.username in organisers
return cls(get_is_organiser_child)
def __call__(self, booking):
return self.get_is_organiser_child(booking) and 1.0 or 0.0
class PreferAdminChildren(object):
""" Scores bookings of children higher if their parents are admins. """
def __init__(self, get_is_association_child):
self.get_is_association_child = get_is_association_child
@classmethod
def from_session(cls, session):
members = None
def get_is_association_child(booking):
nonlocal members
if members is None:
members = {
u.username for u in session.query(User)
.filter(User.role == 'admin')
.filter(User.active == True)
}
return booking.username in members
return cls(get_is_association_child)
def __call__(self, booking):
return self.get_is_association_child(booking) and 1.0 or 0.0
class PreferGroups(object):
""" Scores group bookings higher than other bookings. Groups get a boost
by size:
- 2 people: 1.0
- 3 people: 0.8
- 4 people: 0.6
- more people: 0.5
This preference gives an extra boost to unprioritised bookings, to somewhat
level out bookings in groups that used no star (otherwise a group
might be split up because someone didn't star the booking).
Additionally a unique boost between 0.010000000 to 0.099999999 is given to
each group depending on the group name. This should ensure that competing
groups generally do not have the same score. So an occasion will generally
prefer the members of one group over members of another group.
"""
def __init__(self, get_group_score):
self.get_group_score = get_group_score
@classmethod
def from_session(cls, session):
group_scores = None
def unique_score_modifier(group_code):
digest = hashlib.sha1(group_code.encode('utf-8')).hexdigest()[:8]
number = int(digest, 16)
return float('0.0' + str(number)[:8])
def get_group_score(booking):
nonlocal group_scores
if group_scores is None:
query = session.query(Booking).with_entities(
Booking.group_code,
func.count(Booking.group_code).label('count')
).filter(
Booking.group_code != None,
Booking.period_id == booking.period_id
).group_by(
Booking.group_code
).having(
func.count(Booking.group_code) > 1
)
group_scores = {
r.group_code:
max(.5, 1.0 - 0.2 * (r.count - 2))
+ unique_score_modifier(r.group_code)
for r in query
}
return group_scores.get(booking.group_code, 0)
return get_group_score
def __call__(self, booking):
offset = 0 if booking.priority else 1
return self.get_group_score(booking) + offset
| StarcoderdataPython |
11236958 | <reponame>ensoft/leap
# -----------------------------------------------------------------
# stackplot.py - Creates a stackplot from labelled coordinate data.
# June - September 2018 - <NAME>, Hrutvik
# -----------------------------------------------------------------
"""
Class that interacts with the flamegraph tool.
Implements the GenericDiaplay interface to display an interactive matplotlib
stackplot figure.
"""
__all__ = ("StackPlot", )
import logging
from typing import NamedTuple
import matplotlib.pyplot as plt
import numpy as np
from marple.common import (
config,
consts,
util
)
from marple.display.interface.generic_display import GenericDisplay
logger = logging.getLogger(__name__)
class StackPlot(GenericDisplay):
"""
The class representing stackplots.
"""
class DisplayOptions(NamedTuple):
"""
- top_processes: the number of processes to be displayed in the
stackplot
"""
top_processes: int
@util.log(logger)
def __init__(self, data):
"""
Constructor for the StackPlot.
:param data:
A `data_io.PointData` object that encapsulated the collected data
we want to display using the stackplot.
"""
# Initialise the base class
super().__init__(data)
top_processes = config.get_option_from_section(
consts.DisplayOptions.STACKPLOT.value, "top", typ="int")
self.display_options = self.DisplayOptions(top_processes)
# Read the data into a dict
datapoints = {}
for point in data.datum_generator:
if point.x not in datapoints:
datapoints[point.x] = []
datapoints[point.x].append((point.y, point.info))
# Collapse same labels at same x
self._collapse_labels(datapoints)
# Set of unique labels that will be displayed
seen_labels = set()
# Dict of x-coord to other points not in the top n at that x
other = {}
for x in datapoints:
# Sort tuples at each time step by memory and take top elements
data_descending = sorted(datapoints[x],
key=lambda z: z[0],
reverse=True)
# Only keep first n at each time step
datapoints[x] = data_descending[0:self.display_options
.top_processes]
# Sum the rest of the values separately, as "other"
if x not in other:
try:
other[x] = np.sum(z[0] for z in
data_descending[self.display_options
.top_processes:])
except IndexError as ie:
raise IndexError("Not enough data to display stackplot "
"with {} rows, use smaller number. {}"
.format(self.display_options
.top_processes,
ie.args))
# Keep track of the labels that are in use in top n
seen_labels = seen_labels.union(set(z[1] for z in datapoints[x]))
# Insert the missing datapoints
self._add_missing_datapoints(datapoints, seen_labels)
# Sort again
for x in datapoints:
datapoints[x] = sorted(datapoints[x],
key=lambda z: z[1],
reverse=True)
y_values_list = []
# @@@ separate other and tuple_list into different for loops
for index in range(len(seen_labels)+1):
values = []
for x, tuple_list in datapoints.items():
if index == 0:
values.append(other[x])
else:
values.append(tuple_list[index-1][0])
y_values_list.append(values)
labels_list = ["other"]
for _, tuple_list in datapoints.items():
for(_, label) in tuple_list:
labels_list.append(label)
break
# Create the data to be plotted
self.x_values = sorted(time for time in datapoints)
self.y_values = np.stack(y_values_list)
self.labels = labels_list
@staticmethod
def _collapse_labels(datapoints):
"""
Collapses unique labels at each x-coordinate.
Takes a dict representing a 2d graph with labels and makes the labels
unique for each x coordinate by adding the y values for the same
label. MODIFIES THE DICT PASSED AS AN ARGUMENT.
:param datapoints:
A dict with x-coordinates as keys and tuples of (y,label) as values.
"""
# e.g. in: x1 -> (y1, label1), (y2, label2), (y3, label1)
# out: x1 -> (y1+y3, label1), (y2, label2)
for x in datapoints:
# use dict to make labels at each x unique and add y's
points = {}
for (y, label) in datapoints[x]:
if label not in points:
points[label] = y
else:
points[label] += y
# Convert back to list of tuples and modify the input dict
datapoints[x] = [(y, label) for label, y in points.items()]
@staticmethod
def _add_missing_datapoints(datapoints, seen_labels):
"""
Adds datapoints to the graph to make it plottable.
Stackplot can only be plotted if there are the same number of (y,
label) for each x, so add (0.0, label) where necessary, so that all
seen labels exist at each x. MODIFIES THE DICT PASSED AS AN ARGUMENT.
:param datapoints:
A dict with x-coordinates as keys and tuples of (y,label) as values.
:param seen_labels:
The set of labels that need to exist at each x.
"""
for x, data_tuple_list in datapoints.items():
labels_at_key = set(tup[1] for tup in data_tuple_list)
for _label in seen_labels - labels_at_key:
datapoints[x].append((0.0, _label))
@util.log(logger)
@util.Override(GenericDisplay)
def show(self):
ax = plt.subplot(1, 1, 1)
try:
# Create the plot, ordering by time coordinates
ax.stackplot(self.x_values, self.y_values, labels=self.labels)
except KeyError as ke:
raise KeyError("Not enough information to create a stackplot. "
"Need at least two samples. {}".format(ke.args))
# Set legend, inverting the elements to have "other" at the bottom
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles[::-1], labels[::-1])
# @@@ Set labels passed as arguments (options)
ax.set_xlabel(self.data_options.x_label + ' / ' +
self.data_options.x_units)
ax.set_ylabel(self.data_options.y_label + ' / ' +
self.data_options.y_units)
plt.show()
| StarcoderdataPython |
9727036 | <filename>molecularsentry_match_mutant_position.py<gh_stars>0
import sys
import glob
file1= sys.argv[1] # list of the variants file
fh = open(file1.strip(),"r")
file2= sys.argv[2] # output from the epitope.js having epitopes-paratopes information from PDB
eph = open(file2.strip(),"r")
z=fh.readline()
import itertools
from itertools import groupby
#from collections import OrderedDict
import re
# function to split alpha_numberic characters example ["V141K"] to ["V","141","K"]
#def split_text(alnum):
# for i, j in groupby(alnum, str.isalpha):
# yield ''.join(j)
##updated for the deletion variants example ["V141-"] to ["V","141","-"]
def split_text(alnumspchr):
return filter(None, re.split(r'(\d+)', alnumspchr))
# Processing variants file
variants=[]
variants_pos=[]
for line in fh:
line=line.strip()
variants.append(line)
variants_pos.append(list(split_text(line)))
#print(variants_pos)
# https://github.com/ncbi/icn3d/tree/master/icn3dnode
# https://github.com/ncbi/icn3d/blob/master/icn3dnode/epitope.js
# Processing pdb_epitopes_paratopes file from epitope.js command
epipara=[]
for line in eph:
line=line.strip().split(",")
# print(line)
pdb=line[1].strip().split("_")
epipara.append([pdb[0],pdb[1],line[2].strip()])
#print(epipara)
##updated to get unique epitope-paratope position
unique_epipara = [list(x) for x in set(tuple(x) for x in epipara)]
# matching amino acid postion for wild type and mutant type
match_pos_wild=[]
match_pos_mutant=[]
for i in variants_pos:
a=int(i[1])
#print(a)
for j in unique_epipara:
b=int(j[2])
#print(b)
if a==b:
match_pos_mutant.append([j+list(i[2])])
match_pos_wild.append([j+list(i[0])])
# flatten the nested lists
match_pos_wld=(list(itertools.chain.from_iterable(match_pos_wild)))
match_pos_mut=(list(itertools.chain.from_iterable(match_pos_mutant)))
# function for converting list to string as input for interaction2.js command
def listToString(ls):
strg = " "
return (strg.join(ls))
for w, m in zip(match_pos_wld, match_pos_mut):
print((listToString(w)),(listToString(m)), sep="\n") | StarcoderdataPython |
11312111 | # -*- coding: utf-8 -*-
from flask import Flask, request, render_template
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8')
app = Flask(__name__, static_folder='static', static_url_path='')
workspaces = {}
logfile = ""
config_task = {}
config_repo = {}
@app.route('/')
def index():
log_info = open(logfile).read()
return render_template(
'index.html', title="Index", log_info=log_info)
@app.route('/task', methods=["Get", "POST"])
def task():
task = request.args.get('name')
if task not in config_task:
return
build_info = ""
change_log = ""
workspace = config_task[task]["workspace"]
if os.path.isfile(workspace+"/build.txt"):
build_info = open(workspace+"/build.txt").read()
if os.path.isfile(workspace+"/changelog.txt"):
change_log = open(workspace+"/changelog.txt").read()
repo_lists = config_task[task]["repo_list"]
return render_template(
'task.html', title=task, build_info=build_info,
change_log=change_log, task=task, repo_lists=repo_lists)
@app.route('/repo', methods=["Get", "POST"])
def repo():
repo = request.args.get('name')
if repo not in config_repo:
return
task_lists = config_repo[repo]
return render_template(
'repo.html', title=repo, task_lists=task_lists)
def load_config(config):
global config_task, config_repo, logfile
logfile = config["logfile"]
app.config["push_server"] = config["push_server"]
app.config["tasks"] = []
for _config_task in config["task"]:
task_name = _config_task["name"]
config_task[task_name] = _config_task
app.config["tasks"].append(task_name)
for repo_name, branch in _config_task["repo_list"]:
if repo_name not in config_repo:
config_repo[repo_name] = []
config_repo[repo_name].append([task_name, branch])
def run(config):
load_config(config)
app.run(debug=True, port=5000, host='0.0.0.0')
| StarcoderdataPython |
11364119 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for working with jinja2 templates."""
import functools
import logging
import os
import jinja2
def _LoadTemplate(template_subdir, template_name):
template_path = os.path.join(
os.path.dirname(__file__), '../templates', template_subdir, template_name)
logging.info('Loading template: %s/%s', template_subdir, template_name)
return open(template_path).read() if os.path.isfile(template_path) else None
def _RenderTemplate(template_subdir, template_name, **context):
"""Loads a template file and renders it to unicode.
Args:
template_subdir: The subdirectory in gae/templates containing the template
file.
template_name: The name of the template file.
**context: Optional key/value pairs to render into the template.
Returns:
The given template file rendered with the given context as a unicode string.
Raises:
jinja2.TemplateNotFound: if the given template file doesn't exist.
"""
# Create a partial loading function, which will return the contents of the
# template given just the template name.
loading_func = functools.partial(_LoadTemplate, template_subdir)
# Construct an Environment and retrieve the Template.
env = jinja2.Environment(
loader=jinja2.FunctionLoader(loading_func),
autoescape=True,
extensions=['jinja2.ext.autoescape'],
finalize=lambda value: value or '',
variable_start_string='[[',
variable_end_string=']]',
undefined=jinja2.StrictUndefined)
template = env.get_template(template_name)
# Render the template with the provided context.
return template.render(**context)
def RenderWebTemplate(template_name, **context):
return _RenderTemplate('web', template_name, **context)
def RenderEmailTemplate(template_name, **context):
return _RenderTemplate('email', template_name, **context)
| StarcoderdataPython |
11354804 | from django.dispatch import receiver
from django.db.models.signals import post_save, pre_delete
from django.contrib.sessions.models import Session
from .models import CustomUser
from primaseru.models import StudentProfile, FatherStudentProfile, MotherStudentProfile, StudentGuardianProfile, PhotoProfile, MajorStudent, StudentFile
from dashboard import models as dashboard_model
@receiver(post_save, sender=CustomUser)
def create_profile(sender, instance, created, **kwargs):
if created:
if not instance.is_staff:
# TODO refactor me, pls.... I beg you.....
# StudentProfile.objects.create(student=instance)
FatherStudentProfile.objects.create(student=instance)
MotherStudentProfile.objects.create(student=instance)
StudentGuardianProfile.objects.create(student=instance)
PhotoProfile.objects.create(student=instance)
MajorStudent.objects.create(student=instance)
StudentFile.objects.create(student=instance)
dashboard_model.StudentStatus.objects.create(student=instance)
print("Created Instance")
@receiver(post_save, sender=CustomUser)
def save_profile(sender, instance, **kwargs):
# TODO same with above me....
if not instance.is_staff:
# instance.studentprofile.save()
instance.fatherstudentprofile.save()
instance.motherstudentprofile.save()
instance.studentguardianprofile.save()
instance.photoprofile.save()
instance.majorstudent.save()
instance.studentfile.save()
instance.studentstatus.save()
print("Instance Saved")
@receiver(pre_delete, sender=Session)
def house_keeping(sender, instance, **kwargs):
pass
| StarcoderdataPython |
6667980 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayCommerceEducateStudentIdentityQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayCommerceEducateStudentIdentityQueryResponse, self).__init__()
self._college_name = None
self._college_online_tag = None
@property
def college_name(self):
return self._college_name
@college_name.setter
def college_name(self, value):
self._college_name = value
@property
def college_online_tag(self):
return self._college_online_tag
@college_online_tag.setter
def college_online_tag(self, value):
self._college_online_tag = value
def parse_response_content(self, response_content):
response = super(AlipayCommerceEducateStudentIdentityQueryResponse, self).parse_response_content(response_content)
if 'college_name' in response:
self.college_name = response['college_name']
if 'college_online_tag' in response:
self.college_online_tag = response['college_online_tag']
| StarcoderdataPython |
1856989 | REGISTRY = {}
from .q_learner import QLearner
from .cql_learner import CQLLearner
REGISTRY["simple"] = QLearner
REGISTRY["cql"] = CQLLearner | StarcoderdataPython |
6689657 | """
29. Many-to-many and many-to-one relationships to the same table
Make sure to set ``related_name`` if you use relationships to the same table.
"""
from django.db import models
class User(models.Model):
username = models.CharField(max_length=20)
class Issue(models.Model):
num = models.IntegerField()
cc = models.ManyToManyField(User, blank=True, related_name='test_issue_cc')
client = models.ForeignKey(User, related_name='test_issue_client')
def __unicode__(self):
return unicode(self.num)
class Meta:
ordering = ('num',)
__test__ = {'API_TESTS':"""
>>> Issue.objects.all()
[]
>>> r = User(username='russell')
>>> r.save()
>>> g = User(username='gustav')
>>> g.save()
>>> i = Issue(num=1)
>>> i.client = r
>>> i.save()
>>> i2 = Issue(num=2)
>>> i2.client = r
>>> i2.save()
>>> i2.cc.add(r)
>>> i3 = Issue(num=3)
>>> i3.client = g
>>> i3.save()
>>> i3.cc.add(r)
>>> from django.db.models.query import Q
>>> Issue.objects.filter(client=r.id)
[<Issue: 1>, <Issue: 2>]
>>> Issue.objects.filter(client=g.id)
[<Issue: 3>]
>>> Issue.objects.filter(cc__id__exact=g.id)
[]
>>> Issue.objects.filter(cc__id__exact=r.id)
[<Issue: 2>, <Issue: 3>]
# These queries combine results from the m2m and the m2o relationships.
# They're three ways of saying the same thing.
>>> Issue.objects.filter(Q(cc__id__exact=r.id) | Q(client=r.id))
[<Issue: 1>, <Issue: 2>, <Issue: 3>]
>>> Issue.objects.filter(cc__id__exact=r.id) | Issue.objects.filter(client=r.id)
[<Issue: 1>, <Issue: 2>, <Issue: 3>]
>>> Issue.objects.filter(Q(client=r.id) | Q(cc__id__exact=r.id))
[<Issue: 1>, <Issue: 2>, <Issue: 3>]
"""}
| StarcoderdataPython |
11310871 | <gh_stars>0
from django.db import models
from datetime import date
# Create your models here.
from django.urls import reverse #Used to generate URLs by reversing the URL patterns
from datetime import date
from django.contrib.auth.models import User
class Review(models.Model):
""" Model representing a comment"""
description = models.CharField(max_length=500, help_text="Enter a comment...")
mediaItem = models.ForeignKey('Book', on_delete=models.SET_NULL, null=True)
user = models.ForeignKey('auth.User', on_delete=models.CASCADE, null=True)
#created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description
def get_absolute_url(self):
"""
Returns the url to access a detail record for this book.
"""
return reverse('review-detail', args=[str(self.id)])
#class Meta:
# ordering = ('created',)
class Reply(models.Model):
""" Model representing a comment"""
description = models.CharField(max_length=500, help_text="Enter a comment...")
reply_to = models.ForeignKey('Review', on_delete=models.SET_NULL, null=True)
user = models.ForeignKey('auth.User', on_delete=models.CASCADE, null=True)
#created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.description
def get_absolute_url(self):
"""
Returns the url to access a detail record for this book.
"""
return reverse('review-detail', args=[str(self.id)])
#class Meta:
# ordering = ('created',)
class Genre(models.Model):
"""
Model representing a book genre (e.g. Science Fiction, Non Fiction).
"""
name = models.CharField(max_length=200, help_text="Enter a book genre (e.g. Science Fiction, French Poetry etc.)")
def __str__(self):
"""
String for representing the Model object (in Admin site etc.)
"""
return self.name
class Book(models.Model):
"""
Model representing a book (but not a specific copy of a book).
"""
title = models.CharField(max_length=200)
author = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
# Foreign Key used because book can only have one author, but authors can have multiple books
# Author as a string rather than object because it hasn't been declared yet in the file.
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
isbn = models.CharField('ISBN',max_length=13, help_text='13 Character <a href="https://www.isbn-international.org/content/what-isbn">ISBN number</a>')
genre = models.ManyToManyField('Genre', help_text='Select a genre for this book')
# ManyToManyField used because genre can contain many books. Books can cover many genres.
# Genre class has already been defined so we can specify the object above.
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a detail record for this book.
"""
return reverse('book-detail', args=[str(self.id)])
class Music (models.Model):
song_title = models.CharField(max_length=200)
artist = models.ForeignKey('Author', on_delete=models.CASCADE, null=True)
length = models.FloatField(default=2.0)
def __str__(self):
"""
String for representing the Model object.
"""
return self.song_title
def get_absolute_url(self):
"""
Returns the url to access a detail record for this book.
"""
return reverse('music-detail', args=[str(self.id)])
class Visual (models.Model):
creator = models.ForeignKey('Author', on_delete=models.CASCADE)
class Author(models.Model):
"""
Model representing an author.
"""
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
date_of_birth = models.DateField(null=True, blank=True)
date_of_death = models.DateField('Died', null=True, blank=True)
class Meta:
ordering = ["last_name","first_name"]
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('author-detail', args=[str(self.id)])
def __str__(self):
"""
String for representing the Model object.
"""
return '{0}, {1}'.format(self.last_name,self.first_name)
class Television(models.Model):
title = models.CharField(max_length=200)
creator = models.ForeignKey('Author', on_delete=models.SET_NULL, null=True)
seasons = models.IntegerField(default = 1)
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
genre = models.ManyToManyField('Genre')
first_air_date = models.DateField(default=date.today())
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
def get_absolute_url(self):
"""
Returns the url to access a particular author instance.
"""
return reverse('television-detail', args=[str(self.id)])
class Movies(models.Model):
title = models.CharField(max_length=200)
director = models.ForeignKey('Author', on_delete = models.CASCADE, related_name="director")
writer = models.ForeignKey('Author', on_delete = models.CASCADE, related_name="writer")
summary = models.TextField(max_length=1000, help_text='Enter a brief description of the book')
genre = models.ManyToManyField('Genre')
duration = models.IntegerField(default=30)
release_date = models.DateField(default=date.today())
def __str__(self):
"""
String for representing the Model object.
"""
return self.title
| StarcoderdataPython |
1614306 | <filename>tests/python/gaia-unit-tests/gaia_unit_test/reporters/tbpl.py
from base import Base
class TBPLLogger(Base):
def __init__(self, *args, **kwargs):
self.logger = kwargs['logger']
del kwargs['logger']
Base.__init__(self, *args, **kwargs)
def on_pass(self, data):
self.logger.testPass(data['fullTitle'])
def on_fail(self, data):
msg = data['fullTitle']
if 'err' in data:
if 'message' in data['err']:
msg += " | %s" % data['err']['message']
self.logger.testFail(msg)
if 'err' in data and 'stack' in data['err']:
self.logger.info('stack trace:\n%s' % '\n'.join(' %s' % x for x in data['err']['stack'].split('\n')))
def on_suite(self, data):
self.logger.testStart(data['title'])
def on_suite_end(self, data):
self.logger.testEnd(data['title'])
def on_end(self, data):
self.logger.info('suite results (pass/fail): %d/%d' %
(self.passes, self.failures))
| StarcoderdataPython |
1973389 | <filename>src/xp_elems.py
# Yahoo Finance frequently changes class names, use 'contains' for fuzzy matches
XP_ELEMS = {
"top_react" : "//span[text()[contains(.,'Top Reactions')]]",
"newest" : "//span[text()[contains(.,'Newest Reactions')]]",
"old_time_stamp" : "//li//div//div//span//span[text()[contains(.,'day')]]",
"show_more" : "//span[text()[contains(.,'Show more')]]",
"title" : "//h1[@class='D(ib) Fz(18px)']",
"index" : "//span[contains(@class, 'Trsdu(0.3s) Fw(b)')]",
"movement" : "//span[contains(@class, 'Trsdu(0.3s) Fw(500) Pstart(10px) Fz(24px)')]",
"comment_list" : "//ul[contains(@class, 'comments-list')]",
"comment_block" : "//ul//li[contains(@class, 'comment')]",
"comment_user" : ".//button[contains(@aria-label, 'See reaction history')]",
"time_stamp" : ".//div//div//span[contains(@class, 'Fz(12px)')]//span",
"comment_text" : ".//div[contains(@class, 'Wow(bw)')]",
"comment_urls" : ".//a[contains(@href, 'http')]",
"comment_media" : ".//source",
"thumbup" : ".//button[contains(@aria-label, 'Thumbs Up')]",
"thumbdown" : ".//button[contains(@aria-label, 'Thumbs Down')]"
}
| StarcoderdataPython |
3486619 | #!/usr/bin/env python3
# -*- coding: utf-8 -*
#/// DEPENDENCIES
import typing, os, aiofiles
import PIL #Image Conversion
import ffmpy3 as FIL #Sound and Video Conversion
import discord, pandas as pd
from discord.ext import commands
from discord.ext.commands import Bot, MissingPermissions, has_permissions
from chk.enbl import enbl
from chk.has import *
import asyncio
import random
from util import ez
##///---------------------///##
##/// BOT COMMANDS ///##
##///---------------------///##
@commands.command(
aliases = ["change", "converter"],
help = 'oth',
brief = 'Converts a file to a {fmt} file! Supports 316 formats!',
usage = ';]convert {fmt} <attach a file or have an embedable link>',
description = '''\
FMT [TEXT] - The format to convert *to* eg "pdf"
'''
)
@commands.check(enbl)
@commands.check(has_attachment)
async def convert(ctx, fmt):
msg = await ctx.send('```md\n#] INITIALIZING```')
att = ctx.message.attachments[0]
formats = eval(open("msc/formats.json").read())
img_ext = formats["image"]
txt_ext = formats["text"]
aud_ext = formats["audio"]
vid_ext = formats["video"]
ffm_ext = formats["ffmpeg"]
xls_ext = formats["sheet"]
doc_ext = formats["doc"]
ppt_ext = formats["slide"]
ebk_ext = formats["ebook"]
drw_ext = formats["draw"]
mth_ext = formats["math"]
ffmpeg_formats = [img_ext, aud_ext, vid_ext, ffm_ext]
office_formats = [xls_ext, doc_ext, ppt_ext, ebk_ext, drw_ext, mth_ext]
if att.size > 16777216:
return await msg.edit(content = '```diff\n-] ATTACHMENT TOO LARGE [16MB MAX]```')
new = fmt
data = await att.read()
old = att.filename.split('.')[-1]
root = "/home/priz/Desktop/PRIZM/msc/RAW"
if old == 'jpg':
old = 'jpeg'
if old == 'tif':
old = 'tiff'
if new == 'tif':
new = 'tiff'
old, new = old.lower(), new.lower()
await msg.edit(content = "```md\n#] DOWNLOADING```")
await att.save("msc/RAW." + old)
await msg.edit(content = "```md\n#] CONVERTING```")
if old.lower() in img_ext and new.lower() in img_ext:
img = PIL.Image.open("msc/RAW." + old)
if new not in ["png", "gif", "bmp", "tiff", "j2k"]:
img = img.convert("RGB")
img.save("msc/RAW." + new)
elif old.lower() in txt_ext and new.lower() in txt_ext:
os.rename("msc/RAW." + old, "msc/RAW." + new)
await msg.edit(content = "```md\n#] UPLOADING TO DISCORD```")
await ctx.send("```md\n#] CONVERTED ;]```", file = discord.File('msc/RAW.' + new))
await msg.delete()
return os.remove("msc/RAW." + new)
elif any(old in group and new in group for group in ffmpeg_formats):
proc = FIL.FFmpeg(
inputs = {f"{root}.{old}": None},
outputs = {f"{root}.{new}": None}
)
await proc.run_async()
await proc.wait()
elif any(old in group and new in group for group in office_formats):
await ez.proc(f"soffice --headless --convert-to {new} {root}.{old}")
os.rename(f"{root}.{new}", f"{root}.{new}")
else:
await msg.edit(
content = f'''```md
#] CONVERSION FROM {old} TO {new} UNSUPPORTED\
> I only support images, videos, text files, and office documents for now ;[
> Make sure your conversion is within the same category...
```''')
return os.remove('msc/RAW.' + old)
if os.stat("msc/RAW." + new).st_size < 8388608:
await msg.edit(content = "```md\n#] UPLOADING TO DISCORD```")
await ctx.send("```md\n#] CONVERTED ;]```", file = discord.File('msc/RAW.' + new))
else:
await msg.edit(content = """```md
#] UPLOADING TO GITHUB
> This file is too large to upload directly to discord""")
new_name = "".join(random.choice("ABCDEF1234567890") for x in range(16))
new_name += "." + new
open("/home/priz/prizm-hosting/" + new_name, "wb+").write(
open("msc/RAW." + new, "rb").read()
)
git = "/home/priz/prizm-hosting/"
await ez.proc(f"git -C {git}.git {new_name}")
await ez.proc(f"git -C {git}.git commit -m 'new file: " + new_name + "'")
await ez.proc(f"git -C {git}.git push".split())
await ctx.send(f"""```md
#] CONVERTED ;]``````diff
-] This file IS available to everybody that has this URL for 24 hours
=] Sorry about that mate```
**LINK:** https://github.com/VoxelPrismatic/prizm-hosting/blob/master/{name}""")
await msg.delete()
return os.remove("msc/RAW." + new), os.remove('msc/RAW.' + old)
##///---------------------///##
##/// OTHER STUFF ///##
##///---------------------///##
def setup(bot):
print('+COM')
bot.add_command(convert)
print('GOOD')
def teardown(bot):
print('-COM')
bot.remove_command('convert')
print('GOOD')
| StarcoderdataPython |
9743109 | # https://leetcode.com/problems/binary-tree-maximum-path-sum
# https://leetcode.com/problems/binary-tree-maximum-path-sum/
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def __init__(self):
self.max = -float("inf")
def calc_sum(self, node):
if (not node.left) and (not node.right):
self.max = max(self.max, node.val)
return node.val
elif node.left and not node.right:
left_val = self.calc_sum(node.left)
res = max(left_val + node.val, node.val)
self.max = max(self.max, res)
return res
elif not node.left and node.right:
right_val = self.calc_sum(node.right)
res = max(node.val + right_val, node.val)
self.max = max(self.max, res)
return res
else:
left_val, right_val = self.calc_sum(node.left), self.calc_sum(node.right)
res = max(left_val + node.val, node.val, node.val + right_val)
self.max = max(self.max, res, left_val + node.val + right_val)
return res
def maxPathSum(self, root):
_ = self.calc_sum(root)
return self.max
| StarcoderdataPython |
81590 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Update cbpf
------------
"""
from hdx.data.resource import Resource
from hdx.utilities.dictandlist import write_list_to_csv
from src.helpers import cannonize_name
def update_cbpf(base_url, downloader, poolfundabbrv, today, valid_names, replace_values, resource_updates):
year = today.year
if today.month <= 3:
year -= 1
response = downloader.download('%sProjectSummary?poolfundAbbrv=%s' % (base_url, poolfundabbrv))
jsonresponse = response.json()
projects = jsonresponse['value']
transactions = dict()
for project in projects:
if project['AllocationYear'] != year:
continue
code = project['ChfProjectCode']
budget = project['Budget']
directcost = project['TotalDirectCost']
supportcost = project['TotalSupportCost']
transactions[code] = budget, directcost, supportcost
response = downloader.download('%sLocation?poolfundAbbrv=%s' % (base_url, poolfundabbrv))
jsonresponse = response.json()
locations = jsonresponse['value']
totals = dict()
for location in locations:
if location['AllocationYear'] != year:
continue
code = location['ChfProjectCode']
admin1 = cannonize_name(location['AdminLocation1'], valid_names, replace_values)
percentage = float(location['Percentage']) / 100.0
budget, directcost, supportcost = transactions[code]
totalbudget, totaldirectcost, totalsupportcost = totals.get(admin1, (0.0, 0.0, 0.0))
budget *= percentage
directcost *= percentage
supportcost *= percentage
totalbudget += budget
totaldirectcost += directcost
totalsupportcost += supportcost
totals[admin1] = totalbudget, totaldirectcost, totalsupportcost
rows = list()
rows.append(['#adm1+name', '#cashflow+type', '#cashflow+value'])
for admin1 in sorted(totals):
budget, directcost, supportcost = totals[admin1]
rows.append([admin1, 'Budget', round(budget)])
rows.append([admin1, 'Direct Cost', round(directcost)])
rows.append([admin1, 'Support Cost', round(supportcost)])
write_list_to_csv(rows, resource_updates['cbpf']['path'], headers=['Admin Location', 'Cashflow Type', 'Cashflow Value'])
| StarcoderdataPython |
8160812 | from django.shortcuts import render
from django.db.models import Q
from api.v1.tools.paginator import customPagination
# serializers imports
from .serializers import NotificationSerializer
# rest_frameworks imports
from rest_framework.views import APIView
from rest_framework import status
from rest_framework.permissions import IsAuthenticated
# model imports
from notification.models import Notification
# custom response format
from api.v1.ResponseFormat import responseFormat
class NotificationList(APIView):
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
user_type = request.GET.get('user', 'user')
user_id = request.GET.get('user_id', False)
public = request.GET.get('public', False)
page_number = request.GET.get('page_number', 1)
page_size = request.GET.get('page_size', 5)
if request.user.is_admin == True and user_type == "admin":
if user_id:
queryset = Notification.objects.filter(user=user_id)
if public == 'true':
queryset = Notification.objects.filter(user__isnull=False)
print("public : ",queryset)
if public and user_id:
queryset = Notification.objects.all()
else:
query = (Q(user=request.user) | Q(user__isnull=True)) & Q(active=True)
queryset = Notification.objects.filter(query)
data=customPagination(queryset=queryset,page_size=page_size,page_number=page_number,Serializers=NotificationSerializer)
return responseFormat(data=data, status_code=status.HTTP_200_OK)
| StarcoderdataPython |
11214550 | <filename>networking_cisco/plugins/cisco/cfg_agent/device_status.py
# Copyright 2014 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import socket
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import timeutils
import networking_cisco.plugins.cisco.common.cisco_constants as cc
from neutron.agent.linux import utils as linux_utils
from networking_cisco._i18n import _
import pprint
LOG = logging.getLogger(__name__)
STATUS_OPTS = [
cfg.IntOpt('device_connection_timeout', default=30,
help=_("Time in seconds for connecting to a hosting device")),
cfg.IntOpt('hosting_device_dead_timeout', default=300,
help=_("The time in seconds until a backlogged hosting device "
"is presumed dead. This value should be set up high "
"enough to recover from a period of connectivity loss "
"or high load when the device may not be responding.")),
]
cfg.CONF.register_opts(STATUS_OPTS, "cfg_agent")
def _is_pingable(ip):
"""Checks whether an IP address is reachable by pinging.
Use linux utils to execute the ping (ICMP ECHO) command.
Sends 5 packets with an interval of 0.2 seconds and timeout of 1
seconds. Runtime error implies unreachability else IP is pingable.
:param ip: IP to check
:return: bool - True or False depending on pingability.
"""
ping_cmd = ['ping',
'-c', '5',
'-W', '1',
'-i', '0.2',
ip]
try:
linux_utils.execute(ping_cmd, check_exit_code=True)
return True
except RuntimeError:
LOG.warning("Cannot ping ip address: %s", ip)
return False
def _can_connect(ip, port):
"""Checks if a TCP port at IP address is possible to connect to"""
cs = socket.socket()
try:
cs.connect((ip, port))
cs.close()
return True
except socket.error:
return False
class DeviceStatus(object):
"""Device status and backlog processing."""
_instance = None
def __new__(cls):
if not cls._instance:
cls._instance = super(DeviceStatus, cls).__new__(cls)
return cls._instance
def __init__(self):
self.hosting_devices_backlog = {}
self.enable_heartbeat = False
def backlog_hosting_device(self, hosting_device):
hd_id = hosting_device['id']
hd_mgmt_ip = hosting_device['management_ip_address']
# Modifying the 'created_at' to a date time object if it is not
if not isinstance(hosting_device['created_at'], datetime.datetime):
hosting_device['created_at'] = datetime.datetime.strptime(
hosting_device['created_at'], '%Y-%m-%d %H:%M:%S')
if hd_id not in self.hosting_devices_backlog:
if 'hd_state' not in hosting_device:
hosting_device['hd_state'] = hosting_device['status']
hosting_device['backlog_insertion_ts'] = max(
timeutils.utcnow(),
hosting_device['created_at'] +
datetime.timedelta(seconds=hosting_device['booting_time']))
self.hosting_devices_backlog[hd_id] = {'hd': hosting_device}
LOG.debug("Hosting device: %(hd_id)s @ %(ip)s is now added "
"to backlog", {'hd_id': hd_id, 'ip': hd_mgmt_ip})
def backlog_hosting_devices(self, hosting_devices):
for hosting_device in hosting_devices:
self.backlog_hosting_device(hosting_device)
def get_backlogged_hosting_devices(self):
return self.hosting_devices_backlog.keys()
def get_backlogged_hosting_devices_info(self):
resp = self.get_monitored_hosting_devices_info(hd_state_filter='Dead')
return resp
def get_dead_hosting_devices_info(self):
"""
Get a list of hosting devices that have been marked dead
:return: List of dead hosting device ids
"""
res = []
for hd_id in self.hosting_devices_backlog:
hd = self.hosting_devices_backlog[hd_id]['hd']
if hd['hd_state'] == cc.HD_DEAD:
res.append(hd['id'])
return res
def get_monitored_hosting_devices_info(self, hd_state_filter=None):
"""
This function returns a list of all hosting devices monitored
by this agent
"""
wait_time = datetime.timedelta(
seconds=cfg.CONF.cfg_agent.hosting_device_dead_timeout)
resp = []
for hd_id in self.hosting_devices_backlog:
hd = self.hosting_devices_backlog[hd_id]['hd']
display_hd = True
if hd_state_filter is not None:
if hd['hd_state'] == hd_state_filter:
display_hd = True
else:
display_hd = False
if display_hd:
created_time = hd['created_at']
boottime = datetime.timedelta(seconds=hd['booting_time'])
backlogged_at = hd['backlog_insertion_ts']
booted_at = created_time + boottime
dead_at = backlogged_at + wait_time
resp.append({'host id': hd['id'],
'hd_state': hd['hd_state'],
'created at': str(created_time),
'backlogged at': str(backlogged_at),
'estimate booted at': str(booted_at),
'considered dead at': str(dead_at)})
else:
continue
return resp
def is_hosting_device_reachable(self, hosting_device):
"""Check the hosting device which hosts this resource is reachable.
If the resource is not reachable, it is added to the backlog.
* heartbeat revision
We want to enqueue all hosting-devices into the backlog for
monitoring purposes
adds key/value pairs to hd (aka hosting_device dictionary)
_is_pingable : if it returns true,
hd['hd_state']='Active'
_is_pingable : if it returns false,
hd['hd_state']='Unknown'
:param hosting_device : dict of the hosting device
:returns: True if device is reachable, else None
"""
ret_val = False
hd = hosting_device
hd_id = hosting_device['id']
hd_mgmt_ip = hosting_device['management_ip_address']
dead_hd_list = self.get_dead_hosting_devices_info()
if hd_id in dead_hd_list:
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is already marked as"
" Dead. It is assigned as non-reachable",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
return False
# Modifying the 'created_at' to a date time object if it is not
if not isinstance(hd['created_at'], datetime.datetime):
hd['created_at'] = datetime.datetime.strptime(hd['created_at'],
'%Y-%m-%d %H:%M:%S')
if _is_pingable(hd_mgmt_ip):
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is reachable.",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
hd['hd_state'] = cc.HD_ACTIVE
ret_val = True
else:
LOG.debug("Hosting device: %(hd_id)s@%(ip)s is NOT reachable.",
{'hd_id': hd_id, 'ip': hd_mgmt_ip})
hd['hd_state'] = cc.HD_NOT_RESPONDING
ret_val = False
if self.enable_heartbeat is True or ret_val is False:
self.backlog_hosting_device(hd)
return ret_val
def check_backlogged_hosting_devices(self, driver_mgr):
"""Checks the status of backlogged hosting devices.
Skips newly spun up instances during their booting time as specified
in the boot time parameter.
Each hosting-device tracked has a key, hd_state, that represents the
last known state for the hosting device. Valid values for hd_state
are ['Active', 'Unknown', 'Dead']
Each time check_backlogged_hosting_devices is invoked, a ping-test
is performed to determine the current state. If the current state
differs, hd_state is updated.
The hd_state transitions/actions are represented by the following
table.
+------------+---------------------+----------------+----------------+
| current / | Active | Unknown | Dead |
| last state | | | |
+============+=====================+================+================+
| Active | Device is reachable.| Device was | Dead device |
| | No state change | temporarily | recovered. |
| | | unreachable. | Trigger resync |
+------------+---------------------+----------------+----------------+
| Unknown | Device connectivity | Device | Not a valid |
| | test failed. Set | connectivity | state |
| | backlog timestamp | test failed. | transition. |
| | and wait for dead | Dead timeout | |
| | timeout to occur. | has not | |
| | | occurred yet. | |
+------------+---------------------+----------------+----------------+
| Dead | Not a valid state | Dead timeout | Device is |
| | transition. | for device has | still dead. |
| | | elapsed. | No state |
| | | Notify plugin | change. |
+------------+---------------------+----------------+----------------+
:returns: A dict of the format:
::
{"reachable": [<hd_id>,..],
"dead": [<hd_id>,..],
"revived": [<hd_id>,..]}
* reachable - a list of hosting devices that are now reachable
* dead - a list of hosting devices deemed dead
* revived - a list of hosting devices (dead to active)
"""
response_dict = {'reachable': [], 'revived': [], 'dead': []}
LOG.debug("Current Backlogged hosting devices: \n%s\n",
self.hosting_devices_backlog.keys())
for hd_id in self.hosting_devices_backlog.keys():
hd = self.hosting_devices_backlog[hd_id]['hd']
if not timeutils.is_older_than(hd['created_at'],
hd['booting_time']):
LOG.info("Hosting device: %(hd_id)s @ %(ip)s hasn't "
"passed minimum boot time. Skipping it. ",
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
continue
LOG.info("Checking hosting device: %(hd_id)s @ %(ip)s for "
"reachability.", {'hd_id': hd_id,
'ip': hd['management_ip_address']})
hd_state = hd['hd_state']
if _is_pingable(hd['management_ip_address']):
if hd_state == cc.HD_NOT_RESPONDING:
LOG.debug("hosting devices revived & reachable, %s" %
(pprint.pformat(hd)))
hd['hd_state'] = cc.HD_ACTIVE
# hosting device state
response_dict['reachable'].append(hd_id)
elif hd_state == cc.HD_DEAD:
# test if management port is available
if _can_connect(hd['management_ip_address'],
hd['protocol_port']) is True:
LOG.debug("Dead hosting devices revived %s" %
(pprint.pformat(hd)))
hd['hd_state'] = cc.HD_ACTIVE
response_dict['revived'].append(hd_id)
else:
LOG.debug("Cannot connect to management port %(p)d on "
"hosting device with ip %(ip)s",
{'p': hd['protocol_port'],
'ip': hd['management_ip_address']})
else:
LOG.debug("No-op."
"_is_pingable is True and current"
" hd['hd_state']=%s" % hd_state)
LOG.info("Hosting device: %(hd_id)s @ %(ip)s is now "
"reachable. Adding it to response",
{'hd_id': hd_id, 'ip': hd['management_ip_address']})
else:
LOG.info("Hosting device: %(hd_id)s %(hd_state)s"
" @ %(ip)s not reachable ",
{'hd_id': hd_id,
'hd_state': hd['hd_state'],
'ip': hd['management_ip_address']})
if hd_state == cc.HD_ACTIVE:
LOG.debug("hosting device lost connectivity, %s" %
(pprint.pformat(hd)))
hd['backlog_insertion_ts'] = timeutils.utcnow()
hd['hd_state'] = cc.HD_NOT_RESPONDING
elif hd_state == cc.HD_NOT_RESPONDING:
if timeutils.is_older_than(
hd['backlog_insertion_ts'],
cfg.CONF.cfg_agent.hosting_device_dead_timeout):
# current hd_state is now dead, previous state: Unknown
hd['hd_state'] = cc.HD_DEAD
LOG.debug("Hosting device: %(hd_id)s @ %(ip)s hasn't "
"been reachable for the "
"last %(time)d seconds. "
"Marking it dead.",
{'hd_id': hd_id,
'ip': hd['management_ip_address'],
'time': cfg.CONF.cfg_agent.
hosting_device_dead_timeout})
response_dict['dead'].append(hd_id)
LOG.debug("Response: %s", response_dict)
return response_dict
| StarcoderdataPython |
9769553 | from functools import lru_cache
import numpy as np
__all__ = ['my_sum', 'factorial', 'trig_function']
def my_sum(iterable):
tot = 0
for i in iterable:
tot += i
return tot
@lru_cache(maxsize=None) # Note: -> @cache in python >= 3.9
def factorial(n):
return n * factorial(n-1) if n else 1
def trig_function(iterable):
return np.sin(iterable) | StarcoderdataPython |
251710 | <reponame>invisprints/flyai_match
# -*- coding: utf-8 -*
from flyai.processor.base import Base
from flyai.processor.download import check_download
from path import DATA_PATH
from PIL import Image
from flyai.processor.base import Base
import cv2
from path import DATA_PATH
import os
import numpy as np
from albumentations import (
OneOf, Compose, Resize, RandomCrop, Flip, RandomRotate90, HueSaturationValue, GaussNoise,
Rotate, Blur, Normalize, CenterCrop, RandomBrightnessContrast, Rotate, RGBShift, HorizontalFlip,
ShiftScaleRotate, CoarseDropout
)
img_size = (256, 256)
crop_size = (224, 224)
means = np.array([0.485, 0.456, 0.406])
stds = np.array([0.229, 0.224, 0.225])
train_aug = Compose([
Resize(img_size[0], img_size[1]), RandomCrop(crop_size[0], crop_size[1]),
HorizontalFlip(p=0.5), RandomBrightnessContrast(0.3, 0.3), ShiftScaleRotate(0.1), CoarseDropout(8, 32, 32),
Normalize(means, stds)
])
valid_aug = Compose([
Resize(img_size[0], img_size[1]), CenterCrop(crop_size[0], crop_size[1]),
Normalize(means, stds)
])
'''
把样例项目中的processor.py件复制过来替换即可
'''
class Processor(Base):
def __init__(self):
self.img_shape = [crop_size, crop_size, 3]
'''
参数为csv中作为输入x的一条数据,该方法会被dataset.next_train_batch()和dataset.next_validation_batch()多次调用。
'''
def input_x(self, image_path):
img = cv2.imread(os.path.join(DATA_PATH, image_path))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = train_aug(image=img)['image']
img = img.transpose((2, 0, 1)) # channel first
return img
'''
参数为csv中作为输入x的一条数据,该方法会在评估时多次调用。
'''
def output_x(self, image_path):
path = check_download(image_path, DATA_PATH)
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = valid_aug(image=img)['image']
img = img.transpose((2, 0, 1)) # channel first
return img
'''
参数为csv中作为输入x的一条数据,该方法会被dataset.next_train_batch()
和dataset.next_validation_batch()多次调用。评估的时候会调用该方法做数据处理
该方法字段与app.yaml中的input:->columns:对应
'''
def input_y(self, labels):
# one_hot_label = numpy.zeros([10]) ##生成全0矩阵
# one_hot_label[labels] = 1 ##相应标签位置置
# return one_hot_label
return labels
'''
输出的结果,会被dataset.to_categorys(data)调用
'''
def output_y(self, data):
# return numpy.argmax(data)
return data | StarcoderdataPython |
3392318 | <gh_stars>0
import logging
import telegram
from telegram import Bot
from telegram.ext import Updater, CommandHandler
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
API_TOKEN = None # Bot API TOKEN
ibot=Bot(token=API_TOKEN)
# ############################### Main ####################################
def check_isMember(update, context):
group_id = None #Your_Group_ID
usr_id = None #Members_TG_ID
try:
res=ibot.getChatMember(chat_id=group_id, user_id=usr_id)
print(res)
except telegram.error.BadRequest:
print("The User isn't a member of the group.")
def main():
updater = Updater(bot=ibot, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("check", check_isMember))
updater.start_polling()
logger.info("checking")
updater.idle()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6488175 | import unittest
import uuid
from time import sleep
from forge_sdk import rpc
from event_chain.application import controllers
from event_chain.application import models
class SimpeFlowTest(unittest.TestCase):
def setUp(self):
self.alice = rpc.create_wallet(moniker='lily1', passphrase='<PASSWORD>')
self.mike = rpc.create_wallet(moniker='rose1', passphrase='<PASSWORD>')
def test_all(self):
general_factory_params = {
'title': 'Jay Chou Concerts',
'price': 10,
'limit': 20,
'start_time': '2019/09/27 9pm',
'end_time': '2019/09/27 11pm',
'location': 'Las Vegas Hall',
'img_url': 'http://img.com',
'description': 'test_description:' + str(uuid.uuid1()),
}
event_address = controllers.create_event_general(
wallet=self.alice.wallet,
**general_factory_params)
print('alice', self.alice.wallet.address)
sleep(5)
event = models.EventState(rpc.get_single_asset_state(event_address))
print('issuer', event.issuer)
assert event.issuer == self.alice.wallet.address
assert event.limit == 20
assert event.title == 'Jay Chou Concerts'
tickets = controllers.buy_tickets_general(event_address, 2,
self.mike.wallet)
print('tickets', tickets)
assert len(tickets) == 2
sleep(5)
for ticket in tickets:
res = rpc.get_single_asset_state(ticket)
assert res.issuer == self.alice.wallet.address
assert res.owner == self.mike.wallet.address
| StarcoderdataPython |
8135157 | #!/usr/bin/env pipenv-python
import sys
import os
import getopt
import base64
import ujson
import foolaunch
def usage(*args):
if args:
print("{0}: {1}".format(os.path.basename(sys.argv[0]), args[0]), file=sys.stderr)
print("usage: {0} [option]* <cfg>".format(os.path.basename(sys.argv[0])), file=sys.stderr)
print("Options and arguments:", file=sys.stderr)
print(" -p, --profile <arg> : aws credentials profile", file=sys.stderr)
print(" -r, --region <arg> : aws region", file=sys.stderr)
print(" --image-filters <arg> : ami filters", file=sys.stderr)
print(" -t, --instance-type <arg> : ec2 instance type", file=sys.stderr)
print(" --placement <arg> : ec2 availability zone", file=sys.stderr)
print(" --subnet <arg> : vpc subnet name", file=sys.stderr)
print(" --key <arg> : ec2 key pair name", file=sys.stderr)
print(" --instance-profile <arg> : iam instance profile name", file=sys.stderr)
print(" --security-groups <arg> : ec2 security group names (comma separated)", file=sys.stderr)
print(" --tags <arg> : instance tags as JSON string", file=sys.stderr)
print(" --root-volume-size <arg> : root volume size in GB", file=sys.stderr)
print(" --load-balancers <arg> : load balancer names (comma separated)", file=sys.stderr)
print(" --user-data-file <arg> : file containing instance user data", file=sys.stderr)
print(" --spot, --no-spot : use spot pricing (or not)", file=sys.stderr)
print(" --dry-run : dry run", file=sys.stderr)
print(" --name <arg> : ec2 instance name", file=sys.stderr)
print(" -n, --count <arg> : number of instances to launch", file=sys.stderr)
print(" --price <arg> : max price", file=sys.stderr)
sys.exit(1)
def parse_command_line(cfg):
try:
opts, args = getopt.getopt(sys.argv[1:], "p:r:t:n:", [
"profile=",
"region=",
"image-filters=",
"instance-type=",
"placement=",
"subnet=",
"key=",
"instance-profile=",
"security-groups=",
"tags=",
"root-volume-size=",
"load-balancers=",
"user-data-file=",
"spot",
"no-spot",
"dry-run",
"name=",
"count=",
"price="
])
except getopt.GetoptError as err:
usage("bad option")
if len(args) < 1:
usage("missing configuration name")
cfg.apply(args[0])
for opt, arg in opts:
if opt in ('-p', '--profile'):
cfg.profile = arg
elif opt in ('-r', '--region'):
cfg.region = arg
elif opt == '--image-filters':
cfg.image_filters = ujson.loads(arg)
elif opt in ('-t', '--instance-type'):
cfg.instance_type = arg
elif opt == '--placement':
cfg.placement = arg
elif opt == '--subnet':
cfg.subnet = arg
elif opt == '--key':
cfg.key = arg
elif opt == '--instance-profile':
cfg.instance_profile = arg
elif opt == '--security-groups':
cfg.security_groups = arg.split(',')
elif opt == '--tags':
cfg.tags = ujson.loads(arg)
elif opt == '--root-volume-size':
cfg.root_volume_size = int(arg)
elif opt == '--load-balancers':
cfg.load_balancers = arg.split(',')
elif opt == '--user-data-file':
with open(arg, 'rb') as in_file:
cfg.user_data = in_file.read()
elif opt == '--spot':
cfg.spot = True
elif opt == '--no-spot':
cfg.spot = False
elif opt == '--dry-run':
cfg.dry_run = True
elif opt == '--name':
cfg.name = arg
elif opt in ('-n', '--count'):
cfg.count = int(arg)
elif opt == '--price':
cfg.price = float(arg)
else:
assert False
def main():
try:
cfg = foolaunch.Session()
parse_command_line(cfg)
cfg.launch()
except Exception as e:
usage(e)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4833050 | <filename>rendezvous/LineBot/admin.py<gh_stars>0
from django.contrib import admin
from .models import Reservation, Location, User, Announcement, Report
admin.site.register(Reservation)
admin.site.register(Location)
admin.site.register(User)
admin.site.register(Announcement)
admin.site.register(Report) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.