text stringlengths 38 1.54M |
|---|
import torch
n = 1000
mini = -10
maxi = 10
random_points = (mini - maxi) * torch.rand(n, 2) + maxi
PI = torch.tensor(3.14159265359)
points = []
results = []
for point in random_points:
x1 = point[0]
x2 = point[1]
# f_(x1, x2) = sin(x1 + x2/pi)
r = torch.sin(torch.addcdiv(x1, x2, PI))
points.append(point)
results.append(r)
points = torch.stack(points)
results = torch.stack(results)
database = torch.column_stack((points, results))
# print(database)
torch.save(database, "mydataset.dat")
|
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import newimagedlg
class MainWindow(QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent)
self.dirty = False
self.filename = None
self.messageLabel = QLabel()
self.messageLabel.setAlignment(Qt.AlignCenter)
self.messageLabel.setText('居中')
self.setCentralWidget(self.messageLabel)
fileNewAction = QAction(QIcon('images/filenew.png'), '&New', self)
fileNewAction.setShortcut(QKeySequence.New)
fileNewAction.setToolTip('Create a new message')
fileNewAction.triggered.connect(self.printMessage)
self.fileMenu = self.menuBar().addMenu('&File')
self.fileMenu.addAction(fileNewAction)
# 下面用QMetaObject的signal&slot机制实现triggered这个信号与相应槽的连接
metainfo = QMetaObject()
def printMessage(self):
text = 'File/New 菜单'
self.messageLabel.setText(text)
app = QApplication(sys.argv)
app.setApplicationName('test QMetaObject')
form = MainWindow()
form.show()
app.exec_()
|
#1
print("My name is Anmol kalra")
#2
str1 = "Anmol"
str2 = "kalra"
print(str1+str2)
#3
x = int(input("enter value for x "))
y = int(input("enter value for y "))
z = int(input("enter value for z "))
print("x=",x," y =",y," z=",z)
#4
print("let's get started")
#5
s = "Acadview"
course = "Python"
fees = 5000
str1 = "{} {} course fee is {}".format(s,course,fees)
print(str1)
#6
name="Tony Stark"
salary=1000000
print("%s,%d"%(name,salary))
|
"""
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import copy
import datetime as dt
import logging
import weakref
from abc import ABCMeta
from concurrent.futures import Future, ThreadPoolExecutor
from threading import Lock
from typing import Iterable, Optional, Mapping, Tuple, Union
from gs_quant.api.risk import RiskApi
from gs_quant.base import Priceable, PricingKey, Scenario
from gs_quant.context_base import ContextBaseWithDefault, nullcontext
from gs_quant.datetime.date import business_day_offset, is_business_day
from gs_quant.risk import DataFrameWithInfo, ErrorValue, FloatWithInfo, MarketDataScenario, \
PricingDateAndMarketDataAsOf, \
ResolvedInstrumentValues, RiskMeasure, RiskPosition, RiskRequest, \
RiskRequestParameters, SeriesWithInfo
from gs_quant.risk.results import MultipleRiskMeasureFuture
from gs_quant.risk import CompositeScenario, StringWithInfo
from gs_quant.session import GsSession
from gs_quant.target.data import MarketDataCoordinate as __MarketDataCoordinate
_logger = logging.getLogger(__name__)
class PricingFuture(Future):
def __init__(self, pricing_context):
super().__init__()
self.__pricing_context = pricing_context
def result(self, timeout=None):
"""Return the result of the call that the future represents.
:param timeout: The number of seconds to wait for the result if the future isn't done.
If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given timeout.
Exception: If the call raised then that exception will be raised.
"""
if not self.done() and PricingContext.current == self.__pricing_context and self.__pricing_context.is_entered:
raise RuntimeError('Cannot evaluate results under the same pricing context being used to produce them')
return super().result(timeout=timeout)
class MarketDataCoordinate(__MarketDataCoordinate):
def __str__(self):
return "|".join(f or '' for f in (self.mkt_type, self.mkt_asset, self.mkt_class,
'_'.join(self.mkt_point or ()), self.mkt_quoting_style))
class PricingCache(metaclass=ABCMeta):
"""
Weakref cache for instrument calcs
"""
__cache = weakref.WeakKeyDictionary()
@classmethod
def clear(cls):
__cache = weakref.WeakKeyDictionary()
@classmethod
def missing_pricing_keys(cls,
priceable: Priceable,
risk_measure: RiskMeasure,
pricing_key: Optional[PricingKey] = None) -> Tuple[PricingKey, ...]:
pricing_key = pricing_key or PricingContext.current.pricing_key
if priceable in cls.__cache and risk_measure in cls.__cache[priceable]:
cached = cls.__cache[priceable][risk_measure]
return tuple(k for k in pricing_key if k not in cached)
else:
return pricing_key
@classmethod
def get(cls,
priceable: Priceable,
risk_measure: RiskMeasure,
pricing_key: Optional[PricingKey] = None,
return_partial: bool = False) -> Optional[Union[DataFrameWithInfo, FloatWithInfo, SeriesWithInfo]]:
if priceable not in cls.__cache or risk_measure not in cls.__cache[priceable]:
return
pricing_key = pricing_key or PricingContext.current.pricing_key
cached = cls.__cache[priceable][risk_measure]
if len(pricing_key.pricing_market_data_as_of) > 1:
values = [cached[k] for k in pricing_key if k in cached]
if values and (return_partial or len(values) == len(pricing_key.pricing_market_data_as_of)):
return values[0].compose(values, pricing_key)
else:
return cached.get(pricing_key)
@classmethod
def put(cls,
priceable: Priceable,
risk_measure: RiskMeasure,
result: Union[DataFrameWithInfo, FloatWithInfo, SeriesWithInfo],
pricing_key: Optional[PricingKey] = None):
pricing_key = pricing_key or PricingContext.current.pricing_key
if isinstance(result, (DataFrameWithInfo, FloatWithInfo, SeriesWithInfo)):
cls.__cache.setdefault(priceable, {}).setdefault(risk_measure, {}).update(
{k: result.for_pricing_key(k) for k in pricing_key})
@classmethod
def drop(cls, priceable: Priceable):
if priceable in cls.__cache:
cls.__cache.pop(priceable)
class PricingContext(ContextBaseWithDefault):
"""
A context for controlling pricing and market data behaviour
"""
def __init__(self,
pricing_date: Optional[dt.date] = None,
market_data_as_of: Optional[Union[dt.date, dt.datetime]] = None,
market_data_location: Optional[str] = None,
is_async: bool = False,
is_batch: bool = False,
use_cache: bool = False,
visible_to_gs: bool = False,
csa_term: Optional[str] = None,
poll_for_batch_results: Optional[bool] = False,
batch_results_timeout: Optional[int] = None
):
"""
The methods on this class should not be called directly. Instead, use the methods on the instruments,
as per the examples
:param pricing_date: the date for pricing calculations. Default is today
:param market_data_as_of: the date/datetime for sourcing market data (defaults to 1 business day before
pricing_date)
:param market_data_location: the location for sourcing market data ('NYC', 'LDN' or 'HKG' (defaults to LDN)
:param is_async: if True, return (a future) immediately. If False, block (defaults to False)
:param is_batch: use for calculations expected to run longer than 3 mins, to avoid timeouts.
It can be used with is_aync=True|False (defaults to False)
:param use_cache: store results in the pricing cache (defaults to False)
:param visible_to_gs: are the contents of risk requests visible to GS (defaults to False)
:param csa_term: the csa under which the calculations are made. Default is local ccy ois index
**Examples**
To change the market data location of the default context:
>>> from gs_quant.markets import PricingContext
>>> import datetime as dt
>>>
>>> PricingContext.current = PricingContext(market_data_location='LDN')
For a blocking, synchronous request:
>>> from gs_quant.instrument import IRCap
>>> cap = IRCap('5y', 'GBP')
>>>
>>> with PricingContext():
>>> price_f = cap.dollar_price()
>>>
>>> price = price_f.result()
For an asynchronous request:
>>> with PricingContext(is_async=True):
>>> price_f = cap.dollar_price()
>>>
>>> while not price_f.done:
>>> ...
"""
super().__init__()
if pricing_date is None:
pricing_date = dt.date.today()
while not is_business_day(pricing_date):
pricing_date -= dt.timedelta(days=1)
self.__pricing_date = pricing_date
self.__csa_term = csa_term
self.__market_data_as_of = market_data_as_of
# Do not use self.__class__.current - it will cause a cycle
self.__market_data_location = market_data_location or (
self.__class__.path[0].market_data_location if self.__class__.path else 'LDN')
self.__is_async = is_async
self.__is_batch = is_batch
self.__poll_for_batch_results = poll_for_batch_results
self.__batch_results_timeout = batch_results_timeout
self.__risk_measures_in_scenario_by_provider_and_position = {}
self.__futures = {}
self.__use_cache = use_cache
self.__visible_to_gs = visible_to_gs
self.__positions_by_provider = {}
self.__lock = Lock()
def _on_exit(self, exc_type, exc_val, exc_tb):
if exc_val:
raise exc_val
else:
self._calc()
def _calc(self):
positions_by_provider = self.__active_context.__positions_by_provider
session = GsSession.current
batch_result = Future() if self.__is_batch else None
batch_providers = set()
batch_lock = Lock() if self.__is_batch else nullcontext()
def handle_results(requests_to_results: Mapping[RiskRequest, dict]):
for request_, result in requests_to_results.items():
try:
self._handle_results(result, request_)
except Exception as e:
try:
self._handle_results(e, request_)
except Exception as he:
_logger.error('Error setting error result: ' + str(he))
def run_requests(requests_: Iterable[RiskRequest], provider_: RiskApi):
try:
with session:
results = provider_.calc_multi(requests_)
if self.__is_batch:
get_batch_results(dict(zip(results, requests_)), provider_)
else:
handle_results(dict(zip(requests_, results)))
except Exception as e:
handle_results({r: e for r in requests_})
def get_batch_results(ids_to_requests: Mapping[str, RiskRequest], provider_: RiskApi):
def get_results():
try:
with session:
return provider_.get_results(ids_to_requests,
self.__poll_for_batch_results,
timeout=self.__batch_results_timeout)
except Exception as be:
return {r: be for r in ids_to_requests.values()}
def set_results(results: Mapping[RiskRequest, Union[Exception, dict]]):
handle_results(results)
with batch_lock:
# Check if we're the last provide and signal done if so
batch_providers.remove(provider_)
if not batch_providers:
batch_result.set_result(True)
if self.__is_async:
batch_result_pool = ThreadPoolExecutor(1)
batch_result_pool.submit(get_results).add_done_callback(lambda f: set_results(f.result()))
batch_result_pool.shutdown(wait=False)
else:
set_results(get_results())
with self.__lock:
# Group requests by risk_measures, positions, scenario - so we can create unique RiskRequest objects
# Determine how many we will need
while self.__risk_measures_in_scenario_by_provider_and_position:
provider, risk_measures_by_scenario =\
self.__risk_measures_in_scenario_by_provider_and_position.popitem()
for position, scenario_to_risk_measures in risk_measures_by_scenario.items():
for scenario, risk_measures in scenario_to_risk_measures.items():
risk_measures = tuple(sorted(risk_measures, key=lambda m: m.name or m.measure_type.value))
positions_by_provider.setdefault(provider, {}).setdefault((scenario, risk_measures), [])\
.append(position)
if self.__positions_by_provider:
num_providers = len(self.__positions_by_provider)
request_pool = ThreadPoolExecutor(num_providers) if num_providers > 1 or self.__is_async else None
batch_providers = set(self.__positions_by_provider.keys())
while self.__positions_by_provider:
provider, positions_by_scenario_and_risk_measures = self.__positions_by_provider.popitem()
requests = [
RiskRequest(
tuple(positions),
risk_measures,
parameters=self.__parameters,
wait_for_results=not self.__is_batch,
pricing_location=self.__market_data_location,
scenario=scenario,
pricing_and_market_data_as_of=self._pricing_market_data_as_of,
request_visible_to_gs=self.__visible_to_gs
)
for (scenario, risk_measures), positions in positions_by_scenario_and_risk_measures.items()
]
if request_pool:
request_pool.submit(run_requests, requests, provider)
else:
run_requests(requests, provider)
if request_pool:
request_pool.shutdown(wait=not self.__is_async)
if batch_result and not self.__is_async:
batch_result.result()
def _handle_results(self, results: Union[Exception, dict], request: RiskRequest):
error = None
if isinstance(results, Exception):
error = str(results)
results = {}
_logger.error('Error while handling results: ' + error)
with self.__lock:
for risk_measure in request.measures:
# Get each risk measure from from the request and the corresponding positions --> futures dict
positions_for_measure = self.__futures[(request.scenario, risk_measure)]
# Get the results for this measure
position_results = results.pop(risk_measure, {})
for position in request.positions:
# Set the result for this position to the returned value or an error if missing
result = position_results.get(position, ErrorValue(self.pricing_key, error=error))
if self.__use_cache and not isinstance(result, ErrorValue):
# Populate the cache
PricingCache.put(position.instrument, risk_measure, result)
# Retrieve from the cache - this is used by HistoricalPricingContext. We ensure the cache has
# all values (in case some had already been computed) then populate the result as the final step
result = PricingCache.get(position.instrument, risk_measure)
# Set the result for the future
positions_for_measure.pop(position).set_result(result)
if not positions_for_measure:
self.__futures.pop((request.scenario, risk_measure))
@property
def __active_context(self):
return next((c for c in reversed(PricingContext.path) if c.is_entered), self)
@property
def __parameters(self) -> RiskRequestParameters:
return RiskRequestParameters(csa_term=self.__csa_term, raw_results=True)
@property
def __scenario(self) -> Optional[MarketDataScenario]:
scenarios = Scenario.path
if not scenarios:
return None
return MarketDataScenario(scenario=scenarios[0] if len(scenarios) == 1 else
CompositeScenario(scenarios=tuple(reversed(scenarios))))
@property
def _pricing_market_data_as_of(self) -> Tuple[PricingDateAndMarketDataAsOf, ...]:
return PricingDateAndMarketDataAsOf(self.pricing_date, self.market_data_as_of),
@property
def pricing_date(self) -> dt.date:
"""Pricing date"""
return self.__pricing_date
@property
def market_data_as_of(self) -> Union[dt.date, dt.datetime]:
"""Market data as of"""
if self.__market_data_as_of:
return self.__market_data_as_of
elif self.pricing_date == dt.date.today():
return business_day_offset(self.pricing_date, -1, roll='preceding')
else:
return self.pricing_date
@property
def market_data_location(self) -> str:
"""Market data location"""
return self.__market_data_location
@property
def use_cache(self) -> bool:
"""Cache results"""
return self.__use_cache
@property
def visible_to_gs(self) -> bool:
"""Request contents visible to GS"""
return self.__visible_to_gs
@property
def pricing_key(self) -> PricingKey:
"""A key representing information about the pricing environment"""
return PricingKey(
self._pricing_market_data_as_of,
self.__market_data_location,
self.__parameters,
self.__scenario)
def calc(self, priceable: Priceable, risk_measure: Union[RiskMeasure, Iterable[RiskMeasure]])\
-> Union[list, DataFrameWithInfo, ErrorValue, FloatWithInfo, Future, MultipleRiskMeasureFuture,
SeriesWithInfo]:
"""
Calculate the risk measure for the priceable instrument. Do not use directly, use via instruments
:param priceable: The priceable (e.g. instrument)
:param risk_measure: The measure we wish to calculate
:return: A float, Dataframe, Series or Future (depending on is_async or whether the context is entered)
**Examples**
>>> from gs_quant.instrument import IRSwap
>>> from gs_quant.risk import IRDelta
>>>
>>> swap = IRSwap('Pay', '10y', 'USD', fixed_rate=0.01)
>>> delta = swap.calc(IRDelta)
"""
position = RiskPosition(priceable, priceable.get_quantity())
multiple_measures = not isinstance(risk_measure, RiskMeasure)
futures = {}
active_context_lock = self.__active_context.__lock if self.__active_context != self else nullcontext()
with self.__lock, active_context_lock:
for measure in risk_measure if multiple_measures else (risk_measure,):
scenario = self.__scenario
measure_future = self.__active_context.__futures.get((scenario, measure), {}).get(position)
if measure_future is None:
measure_future = PricingFuture(self.__active_context)
if self.__use_cache:
cached_result = PricingCache.get(priceable, risk_measure)
if cached_result:
measure_future.set_result(cached_result)
if not measure_future.done():
self.__risk_measures_in_scenario_by_provider_and_position.setdefault(
priceable.provider(), {}).setdefault(
position, {}).setdefault(scenario, set()).add(measure)
self.__active_context.__futures.setdefault((scenario, measure), {})[position] = measure_future
futures[measure] = measure_future
future = MultipleRiskMeasureFuture(futures, result_future=PricingFuture(self.__active_context))\
if multiple_measures else futures[risk_measure]
if not (self.is_entered or self.__is_async):
if not future.done():
self._calc()
return future.result()
else:
return future
def resolve_fields(self, priceable: Priceable, in_place: bool) -> Optional[Union[Priceable, Future]]:
"""
Resolve fields on the priceable which were not supplied. Do not use directly, use via instruments
:param priceable: The priceable (e.g. instrument)
:param in_place: Resolve in place or return a new Priceable
**Examples**
>>> from gs_quant.instrument import IRSwap
>>>
>>> swap = IRSwap('Pay', '10y', 'USD')
>>> rate = swap.fixed_rate
fixedRate is None
>>> swap.resolve()
>>> rate = swap.fixed_rate
fixed_rate is now the solved value
"""
resolution_key = self.pricing_key
if priceable.resolution_key:
if in_place:
if resolution_key != priceable.resolution_key:
_logger.warning(
'Calling resolve() on an instrument already resolved under a different PricingContext')
return
elif resolution_key == priceable.resolution_key:
return copy.copy(priceable)
def check_valid(result_):
if isinstance(result_, StringWithInfo):
_logger.error('Failed to resolve instrument fields: ' + result_)
return priceable
if isinstance(result_, ErrorValue):
_logger.error('Failed to resolve instrument fields: ' + result_.error)
return priceable
else:
return result_
result = self.calc(priceable, ResolvedInstrumentValues)
if in_place:
def handle_result(result_):
result_ = check_valid(result_)
if result_ is not priceable:
priceable.unresolved = copy.copy(priceable)
priceable.from_instance(result_)
priceable.resolution_key = result_.resolution_key
if isinstance(result, Future):
result.add_done_callback(lambda f: handle_result(f.result()))
else:
handle_result(result)
else:
if isinstance(result, Future):
result.add_done_callback(lambda f: check_valid(f.result()))
return result
else:
return check_valid(result)
class LivePricingContext(PricingContext):
def __init__(self,
market_data_location: Optional[str] = None,
is_async: bool = False,
is_batch: bool = False,
visible_to_gs: bool = False,
csa_term: Optional[str] = None,
poll_for_batch_results: Optional[bool] = False,
batch_results_timeout: Optional[int] = None
):
# TODO we use 23:59:59.999999 as a sentinel value to indicate live pricing for now. Fix this
d = business_day_offset(dt.date.today(), -1, roll='preceding')
super().__init__(
pricing_date=dt.date.today(),
market_data_as_of=dt.datetime(d.year, d.month, d.day, 23, 59, 59, 999999),
market_data_location=market_data_location,
is_async=is_async,
is_batch=is_batch,
use_cache=False,
visible_to_gs=visible_to_gs,
csa_term=csa_term,
poll_for_batch_results=poll_for_batch_results,
batch_results_timeout=batch_results_timeout
)
|
from django.urls import path, re_path
from . import views
app_name = 'beers'
urlpatterns = [
path('', views.brewery, name='brewery'),
re_path(r'(?P<brewery_id>[0-9]+)/favorite/$', views.favorite, name='favorite'),
re_path(r'(?P<brewery_id>[0-9]+)/$', views.brewery_details, name='brewery_details'),
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-11-07 14:57
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myapp', '0011_auto_20161107_1428'),
]
operations = [
migrations.AddField(
model_name='comment',
name='foto',
field=models.TextField(default=3),
preserve_default=False,
),
]
|
# python 3.7
# https://stepik.org/lesson/3364/step/11?unit=947
a = int(input())
s = 0
while a != 0:
s += a
a = int(input())
print(s)
|
from django.test import TestCase
from edc.core.bhp_content_type_map.classes import ContentTypeMapHelper
from edc.lab.lab_profile.classes import site_lab_profiles
from edc.lab.lab_profile.exceptions import AlreadyRegistered as AlreadyRegisteredLabProfile
from edc.subject.appointment_helper.models import BaseAppointmentMixin
from edc.subject.entry.models import Entry, LabEntry
from edc.testing.classes import TestLabProfile
from edc.testing.classes import TestVisitSchedule, TestAppConfiguration
from ..models import MembershipForm, ScheduleGroup, VisitDefinition
from ..classes import MembershipFormTuple, ScheduleGroupTuple
class TestVisitSchedule(TestCase):
def setUp(self):
# content_type_map_helper = ContentTypeMapHelper()
# content_type_map_helper.populate()
# content_type_map_helper.sync()
try:
site_lab_profiles.register(TestLabProfile())
except AlreadyRegisteredLabProfile:
pass
TestAppConfiguration()
self.test_visit_schedule = TestVisitSchedule()
self.test_visit_schedule.build()
def test_build_membership_form(self):
"""Creates as many instances of membership_form as in the config."""
self.assertEqual(MembershipForm.objects.count(), len(self.test_visit_schedule.membership_forms.values()))
def test_build_schedule_group(self):
"""Creates as many instances of schedule_group as in the config."""
self.assertEqual(ScheduleGroup.objects.count(), len(self.test_visit_schedule.schedule_groups.values()))
def test_build_visit_definition(self):
"""Creates as many instances of visit_definition as in the config."""
self.assertEqual(VisitDefinition.objects.count(), len(self.test_visit_schedule.visit_definitions.values()))
def test_build_entry(self):
"""Creates as many instances of entry as in the config."""
for visit_definition_name in self.test_visit_schedule.visit_definitions:
self.assertEqual(Entry.objects.count(), len(self.test_visit_schedule.visit_definitions[visit_definition_name].get('entries')))
self.assertGreater(Entry.objects.count(), 0)
def test_build_lab_entry(self):
"""Creates as many instances of lab_entry as in the config."""
for visit_definition_name in self.test_visit_schedule.visit_definitions:
self.assertEqual(LabEntry.objects.count(), len(self.test_visit_schedule.visit_definitions[visit_definition_name].get('requisitions')))
self.assertGreater(LabEntry.objects.count(), 0)
def test_visit_definition_knows_membership_form(self):
"""Visit definition knows the MembershipForm and the model is a subclass of BaseAppointmentMixin"""
for visit_definition_name in self.test_visit_schedule.visit_definitions:
schedule_group_name = self.test_visit_schedule.visit_definitions.get(visit_definition_name).get('schedule_group')
schedule_group = self.test_visit_schedule.schedule_groups.get(schedule_group_name)
# the membership_form named tuple in dictionary test_visit_schedule.membership_forms.
self.assertTrue(issubclass(self.test_visit_schedule.membership_forms.get(schedule_group.membership_form_name).__class__, MembershipFormTuple))
# the model in the membership_form named tuple.
self.assertTrue(issubclass(self.test_visit_schedule.membership_forms.get(schedule_group.membership_form_name).model, BaseAppointmentMixin))
def test_visit_definition_knows_schedule_group(self):
"""Visit definition knows ScheduleGroup and it is a subclass of the named tuple."""
for visit_definition_name in self.test_visit_schedule.visit_definitions:
schedule_group_name = self.test_visit_schedule.visit_definitions.get(visit_definition_name).get('schedule_group')
self.assertTrue(issubclass(self.test_visit_schedule.schedule_groups.get(schedule_group_name).__class__, ScheduleGroupTuple))
def test_can_create_membership_form_model_instance(self):
"""Can create and instance of the membership form model."""
for visit_definition_name in self.test_visit_schedule.visit_definitions:
schedule_group_name = self.test_visit_schedule.visit_definitions[visit_definition_name].get('schedule_group')
schedule_group = self.test_visit_schedule.schedule_groups.get(schedule_group_name)
membership_form_model = self.test_visit_schedule.membership_forms[schedule_group.membership_form_name].model
# i know this is a consent model in the test case
from edc.testing.tests.factories import TestConsentWithMixinFactory
self.assertEqual(membership_form_model, TestConsentWithMixinFactory.FACTORY_FOR)
self.assertIsNotNone(TestConsentWithMixinFactory(gender='M'))
|
# -*- coding: utf-8 -*-
from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import HttpResponse
from app01 import models
import os
def upload(request):
if request.method == 'GET':
return render(request,'upload.html')
elif request.method == 'POST':
obj = request.FILES.get('fafafa')
print obj
f = open(os.path.join('upload',obj.name),'wb')
for line in obj.chunks():
f.write(line)
f.close()
return HttpResponse('上传成功')
|
from db import db, ma
import hashlib
class CoworkerModel(db.Model):
__tablename__ = 'Coworker'
coworker_id = db.Column(db.Integer, primary_key=True)
first_name = db.Column(db.String(45), nullable=False)
last_name = db.Column(db.String(45), nullable=False)
email = db.Column(db.String(45), unique=True, nullable=False)
gender = db.Column(db.String(45), nullable=False)
username = db.Column(db.String(45), unique=True, nullable=False)
password = db.Column(db.String(45), nullable=False)
address = db.Column(db.String(45), nullable=False)
postal_area = db.Column(db.String(45), nullable=False)
city = db.Column(db.String(45), nullable=False)
state = db.Column(db.String(45), nullable=False)
country = db.Column(db.String(45), nullable=False)
def __init__(self, first_name,
last_name,
email,
gender,
username,
password,
address,
postal_area,
city,
state,
country):
self.first_name = first_name
self.last_name = last_name
self.email = email
self.gender = gender
self.username = username
self.password = self.hashPassword(password)
self.address = address
self.postal_area = postal_area
self.city = city
self.state = state
self.country = country
def save_to_db(self):
db.engine.execute("INSERT INTO `Coworker` (first_name, last_name, email, gender, username, password, address, postal_area, city, state, country) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",
(self.first_name, self.last_name, self.email, self.gender, self.username, self.password, self.address, self.postal_area, self.city, self.state, self.country))
@classmethod
def delete_from_db(cls, coworker_id):
db.engine.execute("DELETE FROM Coworker WHERE coworker_id=%s", (coworker_id))
@classmethod
def get_all(cls):
return db.engine.execute("SELECT * FROM Coworker")
@classmethod
def find_by_id(cls, id):
return db.engine.execute("SELECT * FROM Coworker WHERE coworker_id=%s", (id)).fetchone()
@staticmethod
def hashPassword(password):
return hashlib.md5(password.encode("utf")).hexdigest()
@classmethod
def find_by_username(cls, username):
return db.engine.execute("SELECT * FROM Coworker WHERE username=%s", (username)).fetchone()
@classmethod
def find_by_email(cls, email):
return db.engine.execute("SELECT * FROM Coworker WHERE email=%s", (email)).fetchone()
class CoworkerSchema(ma.Schema):
class Meta:
fields = ('coworker_id', 'username', 'first_name', 'last_name', 'email',
'gender', 'username', 'password', 'address', 'postal_area', 'city', 'state', 'country')
load_only = ('password',) |
# if any breach occurs, use breach window (first to last value > 0)
# if no breach occurs, use +/- N-days
from hec.hecmath import TimeSeriesMath
from hec.hecmath import DSS
from hec.heclib.util import HecTime
# script entry point for TimeWindow modification.
# arguments:
# runTimeWindow - the runtime window after all the other time window modifications have been applied
# alternative - the TimeWindowAlternative
# computeOptions - the ComputeOptions passed to the TimeWindowMod plugin
#
# Return:
# the new runtime window. If nil is returned a compute error is raised
#
# buffer time window around breach for RAS
RAS_START_BUFFER = 3 # days
RAS_END_BUFFER = 1 # days
def timeWindowMod(runtimeWindow, alternative, computeOptions):
originalRTW = computeOptions.getRunTimeWindow()
dssFile = DSS.open(computeOptions.getDssFilename(), originalRTW.getTimeWindowString())
# pathname for breaches
twmTSM = TimeSeriesMath(alternative.getTimeSeries()) # assumes this is the mapped input to TWM
twmPath = twmTSM.getPath().split("/") # use this for e/f parts
breachPath = "/".join(["", "","BREACHTRACKER-TIMESTEPS REMAINING","TIMESTEPS REMAINING","",twmPath[5], twmPath[6], ""])
# find start and end of breach timeseries
breaches = dssFile.read(breachPath)
dssFile.done()
breachTSC = breaches.getData()
start, end = None, None
rtwStart = runtimeWindow.getStartTime().value()
newStart = HecTime() # keep track of start time that is a valid ResSim timestep
for t,v in zip(breachTSC.times, breachTSC.values):
if v > 0:
if start is None: # first non-zero
start = t
end = t
# update until original start time occurs, make sure this is prev. timestep in ResSim
# avoids interpolated input on start timestep in RAS
if t <= rtwStart:
newStart.set(t)
# no breach
if start is None:
runtimeWindow.setStartTime(newStart)
return runtimeWindow
# compare and adjust if needed
startTime = HecTime()
startTime.set(start)
startTime.subtractDays(RAS_START_BUFFER) # add days to give RAS a little spin up time
if startTime <= runtimeWindow.getStartTime():
runtimeWindow.setStartTime(startTime)
endTime = HecTime()
endTime.set(end)
endTime.addDays(RAS_END_BUFFER) # buffer at end
if endTime >= runtimeWindow.getEndTime():
runtimeWindow.setEndTime(endTime)
alternative.addComputeMessage("New time window set: %s" % runtimeWindow.getTimeWindowString())
return runtimeWindow |
from django.db import models
class destination(models.Model):
name = models.CharField(max_length=100)
img = models.ImageField(upload_to='pics')
des = models.TextField()
price = models.IntegerField()
offer = models.BooleanField(default = False)
class info(models.Model):
Name = models.CharField(max_length=100)
Age = models.IntegerField()
Adderss = models.CharField(max_length=100) |
from Stats_Functions import std_dev, within_percentage
# Housefly wing lengths in millimeters
wing_lengths = [36, 37, 38, 38, 39, 39, 40, 40, 40, 40, 41, 41, 41, 41, 41, 41, 42, 42, 42, 42, 42, 42,
42, 43, 43, 43, 43, 43, 43, 43, 43, 44, 44, 44, 44, 44, 44, 44, 44, 44, 45, 45, 45, 45,
45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 46, 46, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47,
47, 47, 47, 48, 48, 48, 48, 48, 48, 48, 48, 49, 49, 49, 49, 49, 49, 49, 50, 50, 50, 50,
50, 50, 51, 51, 51, 51, 52, 52, 53, 53, 54, 55]
mean_wing_lengths = sum(wing_lengths) / len(wing_lengths)
std_dev_wing_lengths = std_dev(wing_lengths)
# For each point in wing_lengths, calculate the distance from the mean in number of standard deviations.
SD_count_wing_lengths = [(wl - mean_wing_lengths) / std_dev_wing_lengths for wl in wing_lengths]
# Calculate the proportion of the data that's within one standard deviation of the mean. Assign the result to within_one_percentage.
within_one_percentage = within_percentage(SD_count_wing_lengths, 1)
within_two_percentage = within_percentage(SD_count_wing_lengths, 2)
within_three_percentage = within_percentage(SD_count_wing_lengths, 3)
print('within_one_percentage:', within_one_percentage)
print('within_two_percentage:', within_two_percentage)
print('within_three_percentage:', within_three_percentage) |
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
# Marker Styles: http://matplotlib.org/api/markers_api.html
def plot(data, x_row=0, x_label="Thresholds", x_tick_step=3, y_rows=[1], y_labels=["data"], y_markers=["o"],y_colors=["c"], cluster_plot_file="out.png", figure_size=(20,10) ):
x = np.array(data[:,x_row])
y_stack = np.row_stack(np.transpose(data[:,1:]))
fig = plt.figure(figsize=figure_size)
ax1 = fig.add_subplot(111)
for i,y_row_id in enumerate(y_rows):
ax1.plot(x, y_stack[y_row_id,:], label=y_labels[i], color=y_colors[i], marker=y_markers[i])
plt.xticks([step for i,step in enumerate(x) if i%x_tick_step==0])
plt.xlabel(x_label)
handles, labels = ax1.get_legend_handles_labels()
lgd = ax1.legend(handles, labels, loc='lower center')
ax1.grid('on')
plt.savefig(cluster_plot_file)
plt.close()
|
#! /usr/bin/env python
import sys
sys.path.append("../util")
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import numpy as np
import os
import errno
# Local imports
from UtilityFcts import *
def Main():
LegendFSize=18
AxisFSize=22
TitleFSize=27
YLim=[0,6e12]
YTickLabelSize=20
XLim=[0,85.0]
fig,ax = CreateSuplotAxesSimple(1,1,10,15)
PlotFiles=CreateTSInputAnom("oce","Hudson")
PlotPISMTimeSeries(PlotFiles,"flux_crossSection",ax)
ax.fill_between([20,88],[-100,-100],[9e12,9e12],
color="grey", facecolor = 'grey',alpha=0.2)
ax.set_ylabel("Ice flux [m$^3$/yr]",fontsize=AxisFSize)
ax.set_xlim(XLim)
ax.set_ylim(YLim)
ax.legend(loc=1,prop={'size':LegendFSize})
ax.set_title("Hudson ice stream",fontweight="bold",fontsize=TitleFSize)
ax.set_xlabel("Time [kyrs]",fontsize=AxisFSize)
ax.tick_params(axis='both',labelsize=YTickLabelSize)
plt.tight_layout()
SavePlot("SIFig04","TSSurges_OceHudson")
if __name__ == '__main__':
Main()
|
from django.contrib import admin
from votes.models import Game, Vote
class GameAdmin(admin.ModelAdmin):
pass
admin.site.register(Game, GameAdmin)
class VoteAdmin(admin.ModelAdmin):
pass
admin.site.register(Vote, VoteAdmin)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/4/19 14:37
# @Author : LiuZhi
# @Site :
# @File : Extends.py
# @Software: PyCharm
class Animal(object):
def run(self):
print('Animal is running ...')
class Dog(Animal):
#pass
def run(self):
print('Dog is running...')
def eat(self):
print('Eating meat...')
class Cat(Animal):
#pass
def run(self):
print('Cat is running...')
dog = Dog()
dog.run()
cat = Cat()
cat.run()
a = list()
b = Animal()
c = Dog()
print(isinstance(a, list))
print(isinstance(b, Animal))
print(isinstance(c, Dog))
print(isinstance(c, Animal))
def run_twice(animal):
animal.run()
animal.run()
print(run_twice(Animal()))
print(run_twice(Dog()))
class Tortoise(Animal):
def run(self):
print('Tortoise is running slowly...')
print(run_twice(Tortoise()))
#开闭原则:对扩展开发,对修改封闭
class Timer(object):
def run(self):
print('Start...')
|
# Generated by Django 2.0.6 on 2018-12-19 00:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userlogin', '0007_auto_20181218_0002'),
]
operations = [
migrations.CreateModel(
name='User_results',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('User_name', models.CharField(max_length=30)),
('question', models.CharField(max_length=100)),
('answer', models.CharField(max_length=100)),
('user_answer', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now_add=True)),
],
),
migrations.DeleteModel(
name='Results',
),
]
|
'''
Created on 16-Jun-2018
@author: srinivasan
'''
import itertools
import string
class SearchCriteriaException(Exception):
pass
class SearchCriteria:
@staticmethod
def rangeAtoZ():
return [i for i in string.ascii_uppercase]
@staticmethod
def rangeAAtoZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product([i for i in string.ascii_uppercase],
[i for i in string.ascii_uppercase]))]
@staticmethod
def rangeAAAtoZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAtoZZ()))]
@staticmethod
def rangeAAAAtoZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAtoZZZ()))]
@staticmethod
def rangeAAAAAtoZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAtoZZZZ()))]
@staticmethod
def rangeAAAAAAtoZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAtoZZZZZ()))]
@staticmethod
def rangeAAAAAAAtoZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAtoZZZZZZ()))]
@staticmethod
def rangeAAAAAAAAtoZZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAAtoZZZZZZZ()))]
@staticmethod
def rangeAAAAAAAAAtoZZZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAAAtoZZZZZZZZ()))]
@staticmethod
def rangeAAAAAAAAAAtoZZZZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAAAAtoZZZZZZZZZ()))]
@staticmethod
def rangeAAAAAAAAAAAtoZZZZZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAAAAAtoZZZZZZZZZZ()))]
@staticmethod
def rangeAAAAAAAAAAAAtoZZZZZZZZZZZZ():
return [str(v[0]) + str(v[1]) for v in
list(itertools.product(SearchCriteria.rangeAtoZ(),
SearchCriteria.rangeAAAAAAAAAAAtoZZZZZZZZZZZ()))]
@staticmethod
def strRange(start, end_or_len):
def strRange(start, end_or_len, sequence='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
start = start.upper()
if not end_or_len.isdigit():
end_or_len = end_or_len.upper()
seq_len = len(sequence)
start_int_list = [sequence.find(c) for c in start]
if isinstance(end_or_len, int):
end_int_list = list(start_int_list)
i = len(end_int_list) - 1
end_int_list[i] += end_or_len - 1
while end_int_list[i] >= seq_len:
j = end_int_list[i] // seq_len
end_int_list[i] = end_int_list[i] % seq_len
if i == 0:
end_int_list.insert(0, j - 1)
else:
i -= 1
end_int_list[i] += j
else:
end_int_list = [sequence.find(c) for c in end_or_len]
while len(start_int_list) < len(end_int_list) or\
(len(start_int_list) == len(end_int_list) and start_int_list <= end_int_list):
yield ''.join([sequence[i] for i in start_int_list])
i = len(start_int_list) - 1
start_int_list[i] += 1
while start_int_list[i] >= seq_len:
start_int_list[i] = 0
if i == 0:
start_int_list.insert(0, 0)
else:
i -= 1
start_int_list[i] += 1
vals = list(strRange(start, end_or_len))
if(len(vals) == 0):
raise SearchCriteriaException("String Range Search Criteria is wrong(start-{},end-{})".format(
start, end_or_len))
return vals
@staticmethod
def dateRange(start, end, freq='1D', formatter='%Y-%m-%d'):
import pandas as pd
date_rng = pd.date_range(str(start), str(end), freq=freq, closed=None)
vals = date_rng.format(formatter=lambda x: x.strftime(formatter))
if(len(vals) == 0):
raise SearchCriteriaException("Date Range Search Criteria is wrong(start-{},end-{})".format(
start, end))
if 'M' in freq:
if(len(vals) >= 2):
vals.insert(0, ''.join([vals.pop(0)[:-2],
str(start)[-2:]]))
vals.append(''.join([vals.pop(-1)[:-2],
str(end)[-2:]]))
return vals
@staticmethod
def numberRange(start, end, step=1, zeropadSize=None):
if not zeropadSize:
zeropadSize = len(str(end))
zeropad = '{0:0' + str(zeropadSize) + '}'
vals = [str(i) if zeropad == '' else '{}'.format(zeropad).format(i)
for i in range(int(start), int(end) + 1, int(step))]
if(len(vals) == 0):
raise SearchCriteriaException("Number Range Search Criteria is wrong(start-{},end-{})".format(
start, end))
return vals
|
print ("this program is made by Rishu Raj")
a = int (input("enter the length of 1st side of triangle : "))
b = int (input("enter the length of 2nd side of triangle : "))
c = int (input("enter the length of 3rd side of triangle : "))
if a==b==c :
print ("this triangle is equilateral")
if a==b or a==c or b==c :
print ("this is an isosceles triangle")
if a!=b and a!=c and b!=c :
print ("this is an scalane triangle")
|
for t in range(int(input())):
n = int(input())
H = [int(e) for e in input().split()]
if n%2 == 0:
print("no")
else:
if H[0] != 1:
print("no")
else:
flag = 0
for i in range(n//2):
if H[i] != H[n-i-1]:
flag = 1
break
if flag == 0:
for i in range(n//2):
if H[i] - H[i+1] != -1:
flag = 1
break
if flag == 1:
print("no")
else:
print("yes")
|
from werkzeug.security import generate_password_hash, check_password_hash
# from flask_login import UserMixin
from application import db, login_manager
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password_hash = db.Column(db.String(128), nullable=False)
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f"User('{self.username}', '{self.email}')"
# class HealthFacility(db.Model):
# __tablename__ = 'health_facilites'
# id = db.Column(db.Integer, primary_key=True)
# name = db.Column(db.String(64), unique=True, nullable=False)
# facility_type = db.Column(db.String(20), nullable=False)
# location = db.Column(db.String(120))
# email = db.Column(db.String(120))
# class membership(db.Model):
# __tablename__ = 'members'
# id = db.Column(db.Integer, primary_key=True)
# member_name = db.Column(db.String(120), nullable=False)
# benefciary_name = db.Column(db.String(20), nullable=False)
# location = db.Column(db.String(120))
# email = db.Column(db.String(120))
# class expense(db.Model):
# __tablename__ = 'health_facilites'
# internal_id = db.Column(db.Integer, primary_key=True)
# member = db.Column(db.String(64), unique=True, nullable=False)
# facility_type = db.Column(db.String(20), nullable=False)
# location = db.Column(db.String(120))
# email = db.Column(db.String(120))
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
|
# from keras.activations import *
import random
import numpy as np
from keras.losses import mean_squared_error as MSE
from keras.losses import mean_absolute_error as MAE
from keras.losses import mean_absolute_percentage_error as MAPE
import tensorflow as tf
from typing import List
from math import e
from math import tanh
from math import exp
from math import sin
from math import cos
"""# WANN
### Constants
"""
NOT_INPUT_LAYER_FEED_ERROR = '''
Tried to feed input vec to hidden or output layer.
Expected input layer.
'''
OUTPUT_INSERTION = '''
Insertion to the output layer
'''
ERROR = '''
Layer type not found
'''
losses = {'MSE': MSE, 'MAE': MAE, 'MAPE': MAPE}
def relu(x: float):
return max(0., x)
def linear(x: float):
return x if x >= 0 else 2*x
def gauss(x: float):
return e**(x**(-2))
def sigmoid(x: float):
return 1/(1+e**(-x))
random.seed(146)
func_dict = {
'relu': relu,
'lin': linear,
'sigm': sigmoid,
'tanh': tanh,
'exp': exp,
'gauss': gauss,
'sin': sin,
'cos': cos
}
func_list = list(func_dict.keys())
NN_DEBUG_MSG1 = '''
Layer = {}
nodes_id = {}
'''
NN_DEBUG_MSG2 = '''
children_idx:
'''
NN_DEBUG_MSG3 = '''
nodes_vals = {}
nodes_funcs = {}
'''
"""### Input"""
'''############ Code bellow is deprecated to use! ###############'''
'''############ May cause Undefined Behaviour! ###############'''
class InputNode:
def __init__(self, parents_idx, value=0.):
self.parents_idx = parents_idx
self.value = value
self.node_type = 'input'
self.function = 'id'
def receive(self, value):
self.value = value
def forward(self, weight):
return self.value * weight
def add_parent(self, parent_id):
self.parents_idx.append(parent_id)
class InputLayer:
def __init__(self, nodes_id):
self.nodes = nodes_id
self.level = 0
self.layer_type = 'input'
def receive(self, nodes_set, x):
for x, node_id in zip(x, self.nodes):
nodes_set[node_id].receive(x)
def forward(self, nodes_set, weight):
for nid in self.nodes:
val_to_pass = nodes_set[nid].forward(weight)
for pid in nodes_set[nid].parents_idx:
nodes_set[pid].receive(val_to_pass)
"""### Hidden"""
class HiddenNode:
def __init__(self, parents_idx, function_name=random.choice(func_list), value=0.):
self.parents_idx = parents_idx
self.value = value
self.function = func_dict[function_name]
self.node_type = 'hidden'
def receive(self, value):
self.value += value
def forward(self, weight):
self.value = self.function(self.value)
return self.value * weight
def add_parent(self, parent_id):
self.parents_idx.append(parent_id)
class HiddenLayer:
def __init__(self, nodes_id, level):
self.nodes = nodes_id
self.level = level
self.layer_type = 'hidden'
def forward(self, nodes_set, weight):
for nid in self.nodes:
val_to_pass = nodes_set[nid].forward(weight)
for pid in nodes_set[nid].parents_idx:
nodes_set[pid].receive(val_to_pass)
def insert_node(self, new_node_id):
self.nodes.append(new_node_id)
class OutputNode:
def __init__(self, value=0.):
self.value = value
self.function = 'id'
self.parents_idx = [None]
self.node_type = 'output'
def receive(self, value):
self.value += value
class OutputLayer:
def __init__(self, nodes_id):
self.nodes = nodes_id
self.level = -1
self.layer_type = 'output'
def forward(self, nodes_set):
out = []
for nid in self.nodes:
out.append(nodes_set[nid].value)
return out
'''############ Code above is deprecated to use! ###############'''
'''############ Use Node and Layer classes instead. ###############'''
class Node:
def __init__(self, children_idx: List[int], node_type: str, value=0., function_name=None):
self.children_idx = children_idx
self.value = value
self.node_type = node_type
if function_name is None:
function_name = random.choice(func_list)
self.function_name = function_name
self.function = func_dict[function_name] if node_type == 'hidden' else 'id'
def receive(self, value):
if self.node_type == 'input':
self.value = value
else:
self.value += value
def forward(self, weight):
if self.node_type == 'input':
res = self.value * weight
self.value = 0.
return res
if self.node_type == 'hidden':
res = self.function(self.value) * weight
self.value = 0.
return res
if self.node_type == 'output':
res = self.value
self.value = 0.
return res
raise ERROR
# TODO deprecated
def add_children(self, parent_id):
self.children_idx.append(parent_id)
class Layer:
def __init__(self, nodes_id: List[int], layer_type: str):
self.nodes = nodes_id
self.layer_type = layer_type
def feed(self, nodes_set: List[Node], x: List[float]):
if self.layer_type == 'input':
for x, node_id in zip(x, self.nodes):
nodes_set[node_id].receive(x)
else:
raise NOT_INPUT_LAYER_FEED_ERROR
def forward(self, nodes_set: List[Node], weight: float):
if self.layer_type != 'output':
vals = []
for nid in self.nodes:
val_to_pass = nodes_set[nid].forward(weight)
for cid in nodes_set[nid].children_idx:
nodes_set[cid].receive(val_to_pass)
vals.append(val_to_pass)
return vals
else:
out = []
for nid in self.nodes:
out.append(nodes_set[nid].forward(weight))
return out
"""### Neural Net ###"""
class NN:
def __init__(self, input_size: int, output_size: int):
self.nodes_set = []
self.input_size = input_size
self.output_size = output_size
# Creating input nodes
for _ in range(input_size):
out_idx = list(range(input_size, input_size + output_size))
cids = random.sample(out_idx,
k=random.randint(1, output_size))
# print('{} node\'s cids = {}'.format(len(self.nodes_set) + 1, cids))
self.nodes_set.append(Node(cids, node_type='input'))
# Creating output nodes:
for i in range(output_size):
self.nodes_set.append(Node([], node_type='output'))
# Add new layers
self.layers = [Layer(list(range(input_size)), layer_type='input'),
Layer(list(range(input_size, input_size + output_size)), layer_type='output')]
# For Genetic Alg
self.loss = 0
def find_layer_num(self, node_id):
for l in self.layers:
if node_id in l.nodes:
print('l =', self.layers.index(l), l)
return self.layers.index(l)
print('! Layer not found, node_id ={}'.format(node_id))
return 'error'
def rand_conn(self):
pid = random.randint(0, len(self.nodes_set) - 1)
while self.nodes_set[pid].node_type == 'output' or len(self.nodes_set[pid].children_idx) == 0:
pid = random.randint(0, len(self.nodes_set) - 1)
# print(len(self.nodes_set[pid].children_idx))
cid = random.choice(self.nodes_set[pid].children_idx)
return pid, cid
def cut_conn(self):
# Cut random connection
pid, cid = self.rand_conn()
# Create and add new node
new_node = Node([cid], node_type='hidden')
self.nodes_set.append(new_node)
new_node_id = len(self.nodes_set)-1
# Connect new node to parental node
# TODO -- genomes
self.nodes_set[pid].children_idx.remove(cid)
self.nodes_set[pid].children_idx.append(new_node_id)
# Find number of layers of parental and children node
cid_layer_num = self.find_layer_num(cid)
pid_layer_num = self.find_layer_num(pid)
print('cid layer =', cid_layer_num)
print('pid layer =', pid_layer_num)
# Add node or layer
if cid_layer_num - pid_layer_num == 1:
# If there is no layers between cid and pid layers
# Create new Layer and add it
new_layer = Layer([new_node_id], layer_type='hidden')
self.layers.insert(pid_layer_num + 1, new_layer)
print('Added new layer {} with node {} between {} and {}'.format(pid_layer_num + 1, new_node_id, pid, cid))
else:
# If there are another layer(s) between
# add new node randomly into one of the layers
layer_to_insert = random.randint(1, len(self.layers) - 2)
print(len(self.layers) - 2)
if self.layers[layer_to_insert].layer_type not in ['output', 'input']:
self.layers[layer_to_insert].nodes.append(new_node_id)
print('Added new node {} into {} layer'.format(new_node_id, layer_to_insert))
else:
print(OUTPUT_INSERTION)
# def add_conn(self, pid):
# pid = random.choice(len(self.nodes_set))
# cid = random.choice(len(self.nodes_set))
# while find_layer_num()
# def add_layer(self):
# new_node = HiddenNode([])
# new_layer = HiddenLayer()
def change_activation(self):
change_node_id = random.randint(self.input_size + self.output_size - 1, len(self.nodes_set) - 1)
function_name = random.choice(func_list)
if self.nodes_set[change_node_id].node_type == 'input' or self.nodes_set[change_node_id].node_type == 'output':
print('!FATAL ERROR: Trying to change input/output node activation function')
else:
print('Activation function of {} node changed from {} to {}'.format(change_node_id,
self.nodes_set[change_node_id].function_name,
function_name))
self.nodes_set[change_node_id].function = func_dict[function_name]
def mutate(self):
action = random.choice(['cut', 'change'])
if action == 'cut':
self.cut_conn()
if action == 'change':
self.change_activation()
def feed(self, x, weight=1.):
self.layers[0].feed(self.nodes_set, x)
res = []
for layer in self.layers:
res = layer.forward(self.nodes_set, weight)
# print(res)
return np.array(res)
def print(self):
for layer in self.layers:
# print(layer.nodes)
# print(layer)
print(NN_DEBUG_MSG1.format(layer.layer_type,
layer.nodes))
print(NN_DEBUG_MSG2, end='')
for i in layer.nodes:
print(' '*8, self.nodes_set[i].children_idx)
print(NN_DEBUG_MSG3.format([self.nodes_set[i].value for i in layer.nodes],
[self.nodes_set[i].function for i in layer.nodes]))
def pprint(self):
for layer in self.layers:
print(self.layers.index(layer), layer.nodes)
class NaiveGenetic:
def __init__(self, pop_size: int, epoch_num: int, x: np.array, y: np.array, loss_fn_name: str):
self.pop_size = pop_size
self.epochs = epoch_num
self.x = x
self.y = y
if loss_fn_name not in losses.keys():
print('Fatal Error: Incorrect Loss Function')
self.loss = losses[loss_fn_name]
self.population = []
if x.shape[0] != y.shape[0]:
print('!Fatal error: x dim={} =/= y dim={}'.format(x.shape[0], y.shape[0]))
self.lines_num = x.shape[0]
if len(x.shape) != 1:
self.input_size = x.shape[1]
else:
self.input_size = 1
if len(y.shape) != 1:
self.output_size = y.shape[1]
else:
self.output_size = 1
for i in range(self.pop_size):
self.population.append(NN(self.input_size, self.output_size))
self.chart = []
def run(self):
best = []
for epoch in range(self.epochs):
print('Epoch 1 started executing')
for nn in self.population:
for i in range(self.lines_num):
x_pred = nn.feed(self.x[i], 1)
nn.loss = self.loss(x_pred, self.y[i])
self.chart.append(nn)
print('Dataset ran')
self.chart = sorted(self.chart, key=lambda n: tf.dtypes.cast(n.loss, tf.float32))
best = self.chart[:self.pop_size//2]
# Mutation
for nn in best:
nn.mutate()
print('Epoch', epoch, 'loss =', best[0].loss)
return best
class WANN(NaiveGenetic):
def __init__(self, pop_size: int, epoch_num: int, x: np.ndarray, y:np.array):
self.pop_size = pop_size
self.epochs = epoch_num
class iWANN(WANN):
pass
|
import fractions
KEYSPACE = 256
def checkKeys(a, b):
if (fractions.gcd(a,KEYSPACE) != 1):
return False
else:
return True
def encrypt(data, keyA, keyB):
cipher = []
for element in data:
newPixel = []
for color in element:
newPixel.append((keyA * color + keyB) % KEYSPACE)
cipher.append(tuple(newPixel))
return cipher
def decrypt(data, keyA, keyB):
message = []
invA = modinv(keyA,KEYSPACE)
for element in data:
newPixel = []
for color in element:
newPixel.append((invA * (color - keyB)) % KEYSPACE)
message.append(tuple(newPixel))
return message
def egcd(a, b):
x,y, u,v = 0,1, 1,0
while a != 0:
q, r = b//a, b%a
m, n = x-u*q, y-v*q
b,a, x,y, u,v = a,r, u,v, m,n
gcd = b
return gcd, x, y
def modinv(a, m):
gcd, x, y = egcd(a, m)
if gcd != 1:
return None # modular inverse does not exist
else:
return x % m |
from tkinter import *
import win32com.client as wincl
speak = wincl.Dispatch("SAPI.spVoice")
def hi(): # speaking bits
ts = ent.get()
speak.Speak(ts)
root = Tk() # GUI bits
root.title("tktts")
but = Button(root, text="Speak!", command=hi)
lab = Label(root, text="What should i say?")
ent = Entry()
lab.pack()
ent.pack()
but.pack()
root.mainloop()
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import time
import datetime
# print "%s%s" % (time.strftime("%Y%m%d%H%M%S", time.localtime()),".log")
# print time.time()
# print time.localtime(time.time())
# print time.asctime( time.localtime(time.time()) )
# print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# print time.strptime("2016-08-11","%Y-%m-%d")
# print time.mktime(time.strptime("2016-08-11","%Y-%m-%d"))
# #calendar
# import calendar
# cal = calendar.month(2016,1)
# print cal
# import datetime
# print datetime.datetime.now().strftime("%Y%m%d%H%M%S%f")
now = time.time()
today = datetime.date.today() + datetime.timedelta(1)
print today
tomorrow = time.mktime(today.timetuple())
print int(tomorrow - now)
print datetime.datetime.now()
print int( time.mktime(today.timetuple()) - time.time() )
expire_day = datetime.date.today() + datetime.timedelta(days=7)
print expire_day
print int( time.mktime(expire_day.timetuple()) )
|
# Modules used are hashlib: for hashing and deque: for Queue. I did not want to write a new queue when the wheel was already invented
import hashlib
from collections import deque
# I am choosing the sha256 algorithm here as a global configurable. The intention is it can be changed at any time
hash_algorithm = hashlib.sha256
''' The Merkle node defines a node in the merkle tree. It generates a node which points to its primitive nodes(None if leaf nodes) along with the combined transaction hash. '''
class MerkleNode:
def __init__(self, transaction_hash, left, right):
self.transaction_hash = transaction_hash
self.left = left or None
self.right = right or None
def generate_hash(transaction):
# Generate the hash for a single transaction
return hash_algorithm(transaction.encode()).hexdigest()
def create_merkle_nodes(transactions):
# Use the hash values from the transactions to create merkle nodes.
merkle_nodes = []
for transaction in transactions:
merkle_nodes.append(MerkleNode(transaction, None, None))
return merkle_nodes
def generate_padding_node():
# The merkle tree operates on pairs. This function generates a padding node to make sure that the nodes are always even numbers.
hash = generate_hash('0')
return MerkleNode(hash, None, None)
# Generating the Merkle tree.
def generate_merkle_hash(transactions):
transaction_hash_array = []
for transaction in transactions:
transaction_hash_array.append(generate_hash(transaction))
merkle_nodes = deque(create_merkle_nodes(transaction_hash_array))
while len(merkle_nodes) > 1:
if len(merkle_nodes) % 2 != 0:
merkle_nodes.append(generate_padding_node())
length = len(merkle_nodes)
for nodeindex in range(0, (length//2)):
node1 = merkle_nodes.popleft()
node2 = merkle_nodes.popleft()
new_hash = generate_hash(node1.transaction_hash + node2.transaction_hash)
node = MerkleNode(new_hash, node1, node2)
merkle_nodes.append(node)
return merkle_nodes[0].transaction_hash
|
#!/usr/bin/python
"""
Preprocess datasets and generate npz files to be used for training testing.
It is recommended to first read datasets/preprocess/README.md
"""
import argparse
import config as cfg
from datasets.preprocess import coco_extract
parser = argparse.ArgumentParser()
parser.add_argument('--train_files', default=True, action='store_true', help='Extract files needed for training')
parser.add_argument('--eval_files', default=False, action='store_true', help='Extract files needed for evaluation')
if __name__ == '__main__':
args = parser.parse_args()
# define path to store extra files
out_path = cfg.DATASET_NPZ_PATH
openpose_path = cfg.OPENPOSE_PATH
if args.train_files:
# COCO dataset prepreocessing
coco_extract(cfg.COCO_ROOT, openpose_path, out_path)
if args.eval_files:
print(f'not implemented yet!')
# COCO dataset prepreocessing
coco_extract(cfg.COCO_ROOT, openpose_path, out_path)
|
#!/usr/bin/env python
import os
f = open('input/{}.txt'.format(os.path.splitext(os.path.basename(__file__))[0]),'r')
content = f.read()
f.close()
tf = 0
tb = None
i = 1
for c in content:
if(c == '('):
tf += 1
if(c == ')'):
tf -= 1
if(tf == -1 and not tb):
tb = i
i += 1
print("Floor {}".format(tf))
print("First basement visit {}".format(tb))
|
# 13. Даний корінь A1 непорожнього дерева. Подвоїти значення кожної вершини дерева.
class Node(object):
def __init__(self,left = None,right = None,data = None):
self.left = left
self.right = right
self.data = data
def treewidth(tree = None):
if tree == None:
return 0
if tree.data is not None:
tree.data *= 2
if tree.right != None:
treewidth(tree.right)
if tree.left != None:
treewidth(tree.left)
print (tree.data)
# --TEST DATA--
tree1 = Node(Node(Node(Node(),Node(),3),Node(Node(),Node(),3),2), Node(Node(),Node(),2),1)
print(treewidth(tree1)) |
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import gym
import matplotlib.pyplot as plt
import copy
import collections
import random
from torch.distributions import Categorical
# it is actually Dueling DDQN
# hyper-parameters
LR = 0.01
GAMMA = 0.9
op = np.finfo(np.float32).eps.item()
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = torch.device('cpu')
env = gym.make("CartPole-v1")
env = env.unwrapped
NUM_ACTIONS = env.action_space.n
NUM_STATES = env.observation_space.shape[0]
ENV_A_SHAPE = 0 if isinstance(env.action_space.sample(), int) else env.action_space.sample.shape
def setup_seed(seed):
env.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# cudnn.benchmark = False
# cudnn.enabled = False
setup_seed(1234)
class Actor(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(NUM_STATES, 64)
self.fc2 = nn.Linear(64, 128)
self.fc3 = nn.Linear(128, NUM_ACTIONS)
def forward(self, state):
x = torch.tanh(self.fc1(state))
x = torch.tanh(self.fc2(x))
x = self.fc3(x)
policy = F.softmax(x, dim=1)
return policy
class Critic(nn.Module):
def __init__(self):
super(Critic, self).__init__()
self.fc1 = nn.Linear(NUM_STATES, 64)
self.fc2 = nn.Linear(64, 128)
self.fc3 = nn.Linear(128, 1)
def forward(self, state):
out = self.fc1(state)
out = torch.tanh(out)
out = torch.tanh(self.fc2(out))
out = self.fc3(out)
return out
class Agent(nn.Module):
def __init__(self):
super().__init__()
self.actor = Actor().to(device)
self.critic = Critic().to(device)
self.optimizer_Actor = torch.optim.Adam(self.actor.parameters(), lr = 0.001)
self.optimizer_Critic = torch.optim.Adam(self.critic.parameters(), lr = 0.01)
self.loss = nn.SmoothL1Loss()
def choose_action(self, state):
state = torch.Tensor(state).unsqueeze(0)
state = state.to(device)
action_prob = self.actor(state)
#action_prob [batch_sz, acion_dim]
action_prob = Categorical(action_prob)
action = action_prob.sample()
# action [batch_sz]
return action.cpu().item(), action_prob.log_prob(action)
def learn(self, state, next_state, reward, log_prob, I):
self.critic.train()
self.actor.train()
state = torch.Tensor(state).unsqueeze(0).to(device)
next_state = torch.Tensor(next_state).unsqueeze(0).to(device)
state = state.to(device)
next_state = next_state.to(device)
Vs = self.critic(state)
Vs_next = self.critic(next_state)
TD_error = reward + GAMMA*Vs_next - Vs
#updata critic network
loss_critic = self.loss(Vs, reward + GAMMA*Vs_next)
self.optimizer_Critic.zero_grad()
loss_critic.backward()
self.optimizer_Critic.step()
#update actor network
TD_error = TD_error.detach()
loss_actor = -log_prob * I * TD_error
self.optimizer_Actor.zero_grad()
loss_actor.backward()
self.optimizer_Actor.step()
def reward_func(env, x, x_dot, theta, theta_dot):
r1 = (env.x_threshold - abs(x)) / env.x_threshold - 0.5
r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians - 0.5
reward = r1 + r2
return reward
def main():
Actor_Critic = Agent()
episodes = 500
max_time = 200
plt.ion()
fig, ax = plt.subplots()
rewards_list = []
flag = []
time_list = []
for i_episode in range(episodes):
state = env.reset()
ep_rewards = 0
I = 1
for t in range(max_time): # Don't infinite loop while learning
# env.render()
action, log_prob = Actor_Critic.choose_action(state)
next_state, reward, done, _ = env.step(action)
x, x_dot, theta, theta_dot = state
reward = reward_func(env, x, x_dot, theta, theta_dot)
ep_rewards += reward
Actor_Critic.learn(state, next_state, reward, log_prob, I)
I *= GAMMA
state = next_state
if done:
break
rewards_list.append(ep_rewards)
ax.plot(rewards_list, 'g-', label='total_loss')
plt.pause(0.001)
time_list.append(t+1)
if (t+1) == max_time:
flag.append(1)
else:
flag = []
if len(flag) == 10:
print('This Agent is well-trained')
break
if i_episode % 10 == 0:
print("episode: {}, the episode reward is {}, it has played {} times".\
format(i_episode, round(ep_rewards, 3), t+1))
plt.figure()
plt.plot(time_list)
plt.legend(['times it insists'])
if __name__ == '__main__':
main()
|
import json
import os
import sys
from typing import Optional
import click
from pelican_stat.collector import PelicanArticleDataCollector
from pelican_stat.plotter import PelicanDataPlotter
@click.group()
def main() -> None:
pass
@main.command()
@click.argument(
"pelican_conf_path", required=True, default="pelicanconf.py",
)
@click.argument(
"output_path", required=True, default="article_metadata.json",
)
def collect(pelican_conf_path: str, output_path: str) -> None:
"""Collect data from pelican project and export article metadata"""
if not os.path.exists(pelican_conf_path):
print(
f"Configuration file {pelican_conf_path} does not exists", file=sys.stderr
)
sys.exit(1)
data_collector = PelicanArticleDataCollector(pelican_conf_path)
data_collector.export(output_path)
@main.command()
@click.option(
"--pelican-conf-path",
help=(
"path to pelican site configuration file (e.g., pelicanconf.py). "
"note that either --pelican-conf-path or --articles-metadata-path "
"needs to be provided"
),
)
@click.option(
"--articles-metadata-path",
help=(
"path to collected articles metadata. "
"note that either --pelican-conf-path or --articles-metadata-path "
"needs to be provided"
),
)
@click.option(
"--output-path",
required=True,
default="trend_plot.html",
help="path for the output plot",
)
@click.option(
"--year", required=False, help="plot only the data for certain year", type=int
)
@click.option(
"--groupby-category", required=False, help="group data by categoyy", is_flag=True,
)
def plot(
pelican_conf_path: str = None,
articles_metadata_path: str = None,
output_path: str = "trend_plot.html",
year: Optional[int] = None,
groupby_category: bool = False,
) -> None:
"""Draw trend plot based on the frequency of new posts"""
if not pelican_conf_path and not articles_metadata_path:
print(
"Neither pelican_conf_path nor articles_metadata_path is provided",
file=sys.stderr,
)
sys.exit(1)
if (pelican_conf_path and not os.path.exists(pelican_conf_path)) or (
articles_metadata_path and not os.path.exists(articles_metadata_path)
):
print(
f"{pelican_conf_path or articles_metadata_path} does not exist",
file=sys.stderr,
)
sys.exit(1)
if pelican_conf_path:
data_collector = PelicanArticleDataCollector(pelican_conf_path)
articles_info = data_collector.extract_articles_info()
elif articles_metadata_path:
with open(articles_metadata_path, "r") as metadata_file:
articles_info = json.load(metadata_file)
data_plotter = PelicanDataPlotter(articles_info)
data_plotter.draw_trend_plot(
output_path, year=year, groupby_category=groupby_category
)
if __name__ == "__main__":
main()
|
import os
from api import app, db
from datetime import datetime
from api.models import Todo
from ariadne import (
load_schema_from_path,
make_executable_schema,
graphql_sync,
snake_case_fallback_resolvers,
ObjectType,
)
from ariadne.constants import PLAYGROUND_HTML
from flask import request, jsonify
from api.queries import resolve_todos, resolve_todo
from api.mutations import (
resolve_create_todo,
resolve_mark_done,
resolve_delete_todo,
resolve_update_due_date,
)
query = ObjectType("Query")
query.set_field("todos", resolve_todos)
query.set_field("todo", resolve_todo)
mutation = ObjectType("Mutation")
mutation.set_field("createTodo", resolve_create_todo)
mutation.set_field("markDone", resolve_mark_done)
mutation.set_field("deleteTodo", resolve_delete_todo)
mutation.set_field("updateDueDate", resolve_update_due_date)
type_defs = load_schema_from_path("schema.graphql")
schema = make_executable_schema(
type_defs, query, mutation, snake_case_fallback_resolvers
)
@app.route("/graphql", methods=["GET"])
def graphql_playground():
return PLAYGROUND_HTML, 200
@app.route("/graphql", methods=["POST"])
def graphql_server():
data = request.get_json()
success, result = graphql_sync(schema, data, context_value=request, debug=app.debug)
status_code = 200 if success else 400
return jsonify(result), status_code
def insert_data() -> None:
if len(db.session.query(Todo).all()) == 0:
db.create_all()
today = datetime.today().date()
todo = Todo(description="Run a marathon", due_date=today, completed=False)
todo.to_dict()
db.session.add(todo)
db.session.commit()
print("table created")
def main():
insert_data()
if __name__ == "__main__":
port = int(os.environ.get("PORT", 5001))
app.run(debug=True, host="0.0.0.0", port=port)
|
from django.shortcuts import render
from django.shortcuts import redirect
# Create your views here.
def index(req):
return render(req, 'index.html')
def not_a_rickroll(req):
return redirect('https://www.youtube.com/watch?v=dQw4w9WgXcQ') |
#7.07. 3 masala. 2 ta meva qo'shish
meva = {"olma", "banan", "apelsin", "nok"}
meva.update(["shaftoli", "gilos"])
print(meva)
|
''' Creates clusters, txtfiles and wordclouds from xkcd tweeter/user descrptions '''
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import Normalizer
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
import sys
import wordcloud
import os
data = pd.read_csv('xkcd_tweets.csv')
data = data[data['language'] == 'English'].drop_duplicates(cols='user_id')
dataset = data['descriptions'].dropna().values
# Vectorize
true_k = 17 # use NaturalLanguage.py to determine optimal K
vectorizer = TfidfVectorizer(max_df=1.0, max_features=None,
stop_words='english', norm='l2', use_idf=True)
X = vectorizer.fit_transform(dataset)
# Reduce Dimensions with SVD
lsa = TruncatedSVD(25) # use NaturalLanguage.py to determine optimal rank
X = lsa.fit_transform(X)
X = Normalizer(copy=False).fit_transform(X)
# Cluster
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1, verbose=False)
km.fit(X)
# Create cluster outputs
output_dict = {'cluster': km.labels_, 'values': dataset}
output_df = pd.DataFrame(output_dict)
# Create text files
for i in range(true_k):
print len(output_df[output_df.cluster == i]), round(100*len(output_df[output_df.cluster == i]) / float(len(output_df)), 2)
cluster_text = output_df['values'][output_df.cluster == i].values
temp = "cluster " + str(i) + ".txt"
with open(temp, "w") as outfile:
for j in cluster_text:
outfile.write("%s\n" % j)
# Create wordclouds
for i in range(true_k):
text = open('cluster ' + str(i) + '.txt').read()
# Separate into a list of (word, frequency).
words = wordcloud.process_text(text)
# Compute the position of the words.
elements = wordcloud.fit_words(words, font_path='/Library/Fonts/Arial Black.ttf', width=600, height=300)
# Draw the positioned words to a PNG file.
wordcloud.draw(elements, 'cluster ' + str(i) + '.png', font_path="/Library/Fonts/Arial Black.ttf", width=600, height=300)
|
# Allows the user to view the constructed HWP bacxground images. Shows all four
# HWP rotations associated with a single IPPA angle
#
import os
import sys
import glob
import numpy as np
from astropy.io import ascii
from astropy.table import Table as Table
from astropy.table import Column as Column
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import detect_threshold, detect_sources
from scipy.ndimage.filters import median_filter, gaussian_filter
import matplotlib.pyplot as plt
from astropy.visualization import ZScaleInterval
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
polarimetryDir = os.path.join(pyPol_data, 'Polarimetry')
stokesDir = os.path.join(polarimetryDir, 'stokesImages')
################################################################################
# Determine which parts of the fileIndex pertain to science images
fileList = glob.glob(os.path.join(stokesDir, '*I.fits'))
imgList = [ai.reduced.ReducedScience.read(f) for f in fileList]
imgList = np.array(imgList)
# Loop through each image and construct a list of pixel positions
xxList = []
yyList = []
for img in imgList:
ny, nx = img.shape
yy, xx = np.mgrid[0:ny, 0:nx]
xxList.append(xx)
yyList.append(yy)
#******************************************************************************
# Define the event handlers for clicking and keying on the image display
#******************************************************************************
def on_click(event):
global xxList, yyList, imgList, imgNum
global fig, brushSize, ax, maskImg, axImg
# Grab the position of the click
x, y = event.xdata, event.ydata
# Rtreieve the image pixel positions
yy, xx = yyList[imgNum], xxList[imgNum]
# Compute distances from the click and update mask array
dist = np.sqrt((xx - x)**2 + (yy - y)**2)
maskInds = np.where(dist < brushSize*5)
if event.button == 1:
tmpData = maskImg.data
tmpData[maskInds] = 1
maskImg.data = tmpData
if (event.button == 2) or (event.button == 3):
tmpData = maskImg.data
tmpData[maskInds] = 0
maskImg.data = tmpData
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
################################################################################
# Define a function to handle what sholud be done whenever a key is pressed
################################################################################
def on_key(event):
global xxList, yyList, imgList, imgNum
global fig, brushSize, maskImg
global stokesDir
# Handle brush sizing
if event.key == '1':
brushSize = 1
elif event.key == '2':
brushSize = 2
elif event.key == '3':
brushSize = 3
elif event.key == '4':
brushSize = 4
elif event.key == '5':
brushSize = 5
elif event.key == '6':
brushSize = 6
# Increment the image number
if event.key == 'right' or event.key == 'left':
if event.key == 'right':
#Advance to the next image
imgNum += 1
# If there are no more images, then loop back to begin of list
if imgNum > imgList.size - 1:
imgNum = 0
if event.key == 'left':
#Move back to the previous image
imgNum -= 1
# If there are no more images, then loop back to begin of list
if imgNum < 0:
imgNum = imgList.size - 1
# Build the image scaling intervals
img = imgList[imgNum]
zScaleGetter = ZScaleInterval()
thisMin, thisMax = zScaleGetter.get_limits(img.data)
thisMax *= 10
#*******************************
# Update the displayed mask
#*******************************
# Check which mask files might be usable...
baseFile = os.path.basename(img.filename).split('_I')[0]
maskFile = os.path.join(stokesDir,
baseFile + '_mask.fits')
if os.path.isfile(maskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(maskFile))
maskImg = ai.reduced.ReducedScience.read(maskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = ai.reduced.ReducedScience(
(img.data*0).astype(np.int16),
header = img.header
)
maskImg.filename = maskFile
# Grab the pixel positons
yy, xx, = yyList[imgNum], xxList[imgNum]
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Reassign image display limits
axImg.set_clim(vmin = thisMin, vmax = thisMax)
# Display the new images and update extent
axImg.set_data(img.data)
axImg.set_extent((xx.min(), xx.max(), yy.min(), yy.max()))
# Update the annotation
ax.set_title(os.path.basename(img.filename))
# Update the display
fig.canvas.draw()
# Save the generated mask
if event.key == 'enter':
# Write the mask to disk
print('Writing mask for file {}'.format(maskImg.filename))
maskImg.write(clobber=True)
# Clear out the mask values
if event.key == 'backspace':
try:
# Clear out the mask array
maskImg.data = (maskImg.data*0).astype(np.int16)
# Update contour plot (clear old lines redo contouring)
ax.collections = []
ax.contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
except:
pass
#******************************************************************************
# This script will run the mask building step of the pyPol reduction
#******************************************************************************
fig = plt.figure(figsize=(10,9))
# Create the first axis and make the x-axis labels invisible
ax = plt.subplot(111)
plt.setp(ax.get_xticklabels(), fontsize = 12)
plt.setp(ax.get_yticklabels(), fontsize = 12)
# Rescale the figure and setup the spacing between images
plt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.95, top = 0.96,
wspace = 0.02, hspace = 0.02)
# Initalize the image number and brush size
imgNum = 0
brushSize = 3
# Start by grabbing the corresponding group names and IPPAs for those indices
img = imgList[imgNum]
# Build (or read) an initial mask
baseFile = os.path.basename(img.filename).split('_I')[0]
maskFile = os.path.join(stokesDir,
baseFile + '_mask.fits')
if os.path.isfile(maskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(maskFile))
maskImg = ai.reduced.ReducedScience.read(maskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = ai.reduced.ReducedScience(
(img.data*0).astype(np.int16),
header = img.header
)
maskImg.filename = maskFile
# Populate each axis with its image
axImg = img.show(axes = ax, cmap='viridis', noShow = True)
ax.set_title(os.path.basename(img.filename))
# Connect the event manager...
cid1 = fig.canvas.mpl_connect('button_press_event',on_click)
cid2 = fig.canvas.mpl_connect('key_press_event', on_key)
# NOW show the image (without continuing execution)
# plt.ion()
plt.show()
# plt.ioff()
#
# import pdb; pdb.set_trace()
# Disconnect the event manager and close the figure
fig.canvas.mpl_disconnect(cid1)
fig.canvas.mpl_disconnect(cid2)
# Close the plot
plt.close()
print('Done!')
|
from django.db import models
# Create your models here.
class ProjectConfig(models.Model):
accept_project = models.BooleanField(default=False, blank=True)
def __str__(self):
return 'Check if accepting project'
|
import pandas as pd
import datetime as dt
import numpy as np
from scipy.stats import norm
Call={'Strike':[20.00,23.00,25.00,28.00,30.00,33.00,35.00,38.00,40.00,42.00,45.00,47.00,50.00,52.00,55.00,57.50,60.00,
62.5,65.00,67.50,70.00,75.00,80.00,85.00,90.00],
'Call price':[37.8,34.65,32.55,29.60,26.95,24.26,23.7,20.5,16.9,16.9,11.8,9.81,7.2,5.3,3.55,2.16,1.2,0.65,0.29,0.15,0.08,
0.04,0.03,0.02,0.02]}
Call=pd.DataFrame(Call)
DaysToExpiry=(dt.datetime(2015,1,17)-dt.datetime(2014,8,15)).days
t=YearToExpiry=DaysToExpiry/365
S=56.75
r=0.0025
Call['Stock price'],Call['Time to Maturity'],Call['Rate']=S,t,r
#Inputs: s stock price, k strike price, c call price, t time to expiry, vol volatility, r interest rate
def IV(s,k,c,t,vol,r):
d1=(np.log(s/k)+(r+0.5*vol**2)*t)/(vol*np.sqrt(t))
d2=(np.log(s/k)+(r-0.5*vol**2)*t)/(vol*np.sqrt(t))
return (s*norm.cdf(d1)-k*np.exp(-r*t)*norm.cdf(d2))-c
# 1st derivative to volatility function (Vega)
def IVPrime(s,k,c,t,vol,r): # 1st derivative to volatility function
return s*np.sqrt(t/(2*np.pi))*np.exp((-(np.log(s/k)+(r+vol**2/2)*t)**2)/(2*vol**2*t))
#Implement Newton's method
def NewtonRoot(s,k,c,t,r,n,x): #x is initial guess
for i in range(n):
xnew=x-IV(s,k,c,t,x,r)/IVPrime(s,k,c,t,x,r)
if abs(xnew-x)<1e-6:
break
x=xnew
return xnew
#Apply Newton's method to find Implied Volatility
Call['Implied Volatility']=Call.apply(lambda x: NewtonRoot(x[2],x[0],x[1],x[3],x[4],100,0.9),axis=1)
Call
#plot
plt.figure(figsize=(12,8))
plt.plot(Call['Strike'],Call['Implied Volatility'],label='Call Implied Volatility')
plt.xlabel('Strike')
plt.ylabel('Implied Volatility')
plt.legend()
plt.show() |
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, TextAreaField, SelectField
from wtforms.validators import InputRequired, DataRequired
from flask_wtf.file import FileField, FileRequired, FileAllowed
class PropertyForm(FlaskForm):
title = StringField('Property Title', validators=[DataRequired()])
bedroom = StringField('No. of Rooms', validators=[DataRequired()])
bathroom = StringField('No. of Bathrooms', validators=[DataRequired()])
location = StringField('Location', validators=[DataRequired()])
price = StringField('Price', validators=[DataRequired()])
photo = FileField('Photo', validators=[FileRequired(), FileAllowed(['jpg','png','jpeg'],'Images Only!')])
description = TextAreaField('Description', validators=[DataRequired()])
proptype= SelectField('Property Type', choices=[(1,'House'), (2,'Apartment')], validators=[DataRequired()]) |
from os import listdir
from os.path import exists, isdir, join, splitext
import numpy as np
from nnmnkwii.datasets import FileDataSource
available_speakers = ["fujitou", "tsuchiya", "uemura", "hiroshiba"]
available_emotions = ["angry", "happy", "normal"]
def _get_dir(speaker, emotion):
return "{}_{}".format(speaker, emotion)
class TranscriptionDataSource(FileDataSource):
"""Transcription data source for VoiceStatistics dataset
Users are expected to inherit the class and implement ``collect_features``
method, which defines how features are computed given a transcription.
Args:
data_root (str): Data root.
column (str): ``sentense``, ``yomi`` or ``monophone``.
max_files (int): Total number of files to be collected.
Atributes:
transcriptions (list): Transcriptions.
"""
column_map = {"sentence_id": 0, "sentence": 1, "yomi": 2, "monophone": 3}
def __init__(self, data_root, column="sentence", max_files=None):
path = join(data_root, "balance_sentences.txt")
if not exists(path):
raise RuntimeError(
'balance_sentences.txt doesn\'t exist at "{}"'.format(path)
)
self.transcriptions = []
self.max_files = max_files
if column not in self.column_map:
raise ValueError(
"Not supported column {}. It should be one of 'sentense',"
" 'yomi' or 'monophone'.".format(column)
)
with open(path) as f:
for line in f:
# header
if line.startswith("sentence_id"):
continue
v = line.split("\t")[self.column_map[column]].strip()
self.transcriptions.append(v)
assert len(self.transcriptions) == 100
def collect_files(self):
"""Collect text transcriptions.
.. warning::
Note that it returns list of transcriptions (str), not file paths.
Returns:
list: List of text transcription.
"""
if self.max_files is None:
return self.transcriptions
else:
return self.transcriptions[: self.max_files]
class WavFileDataSource(FileDataSource):
"""Wav file data source for Voice-statistics dataset.
The data source collects wav files from voice-statistics.
Users are expected to inherit the class and implement
``collect_features`` method, which defines how features are computed
given a wav file path.
Args:
data_root (str): Data root
speakers (list): List of speakers to load. Supported names of speaker
are ``fujitou``, ``tsuchiya`` and ``uemura``.
labelmap (dict[optional]): Dict of speaker labels. If None,
it's assigned as incrementally (i.e., 0, 1, 2) for specified
speakers.
max_files (int): Total number of files to be collected.
emotions (list): List of emotions we use. Supported names of emotions
are ``angry``, ``happy`` and ``normal``.
Attributes:
labels (numpy.ndarray): List of speaker identifiers determined by
labelmap. Stored in ``collect_files``.
"""
def __init__(
self, data_root, speakers, labelmap=None, max_files=None, emotions=None
):
if emotions is None:
emotions = ["normal"]
for speaker in speakers:
if speaker not in available_speakers:
raise ValueError(
"Unknown speaker '{}'. It should be one of {}".format(
speaker, available_speakers
)
)
for emotion in emotions:
if emotion not in available_emotions:
raise ValueError(
"Unknown emotion '{}'. It should be one of {}".format(
emotion, available_emotions
)
)
self.data_root = data_root
self.speakers = speakers
self.emotions = emotions
if labelmap is None:
labelmap = {}
for idx, speaker in enumerate(speakers):
labelmap[speaker] = idx
self.labelmap = labelmap
self.labels = None
self.max_files = max_files
def collect_files(self):
"""Collect wav files for specific speakers.
Returns:
list: List of collected wav files.
"""
paths = []
labels = []
if self.max_files is None:
max_files_per_dir = None
else:
max_files_per_dir = (
self.max_files // len(self.emotions) // len(self.speakers)
)
for speaker in self.speakers:
dirs = list(
map(lambda x: join(self.data_root, _get_dir(speaker, x)), self.emotions)
)
files = []
for d in dirs:
if not isdir(d):
raise RuntimeError("{} doesn't exist.".format(d))
fs = [join(d, f) for f in listdir(d)]
fs = list(filter(lambda x: splitext(x)[1] == ".wav", fs))
fs = sorted(fs)
fs = fs[:max_files_per_dir]
files.extend(fs)
for f in files:
paths.append(f)
labels.append(self.labelmap[speaker])
self.labels = np.array(labels, dtype=np.int32)
return paths
# For compat, remove this after v0.1.0
VoiceStatisticsWavFileDataSource = WavFileDataSource
|
#!/usr/bin/env python3.6
import data
import argparse
import os
import fi_tools
import sys
import itertools
import config
try:
homedir = os.environ['HOME']
except:
print('Env variable HOME is missing')
sys.exit(1)
def check(appdir, resdir, tool, config, wait, apps, action, instrument, nthreads, inputsize, start, end, generate):
exps=[]
# Create fault injection experiment tuples
for app in apps:
basedir = '%s/%s/%s/%s/%s/%s/%s/%s/%s/'%(resdir, tool, config, wait, app, action, instrument, nthreads, inputsize)
if action == 'fi' or action == 'fi-0' or action == 'fi-1-15':
# get timeout
profiledir = '%s/%s/%s/%s/%s/%s/%s/%s/%s'%(resdir, tool, config, wait, app, 'profile', instrument, nthreads, inputsize)
fname = '/%s/mean_time.txt'%(profiledir)
with open(fname, 'r') as f:
proftime = float( f.read() )
#print('Read mean profiling time: %.2f, setting timeout 10x: %.2f'%(timeout, timeout*20) )
timeout = round(3 * proftime, 2)
else: # profile, look opportunistically for previous runs
# get timeout
profiledir = '%s/%s/%s/%s/%s/%s/%s/%s/%s/1/'%(resdir, tool, config, wait, app, 'profile', instrument, nthreads, inputsize)
fname = '/%s/time.txt'%(profiledir)
if os.path.isfile( fname ):
with open(fname, 'r') as f:
proftime = float( f.read() )
# XXX: PINFI instruction counts BB, but faultinjections is INS! Use a large timeout
#print('Read mean profiling time: %.2f, setting timeout 10x: %.2f'%(timeout, timeout*20) )
timeout = round(3 * proftime, 2)
else:
timeout = 0
for trial in range(start, end+1):
trialdir = basedir + '/' + str(trial) +'/'
#print(trialdir)
# Skip already done experiments
#print('CHECK to skip %s/ret.txt'%(trialdir) )
if os.path.isfile(trialdir+'/ret.txt'):
#print(trialdir)
print('\r','Found %10s trial %4s'%( app, trial ), end="" )
continue
#print('Adding %s trial %s'%( app, trial ) )
if action == 'profile':
# create trialdir
if generate:
if not os.path.exists(trialdir):
os.makedirs(trialdir)
elif action == 'fi' or action == 'fi-0' or action == 'fi-1-15':
assert os.path.exists(trialdir), 'Trialdir %s does not exist, forgot to run generate-fi-samples?'%(trialdir)
assert os.path.isfile(trialdir + '/'+ fi_tools.files[tool]['target']),'Trialdir %s does not exist, forgot to run generate-fi-samples?'%(trialdir)
#assert not os.path.isfile(trialdir + '/'+ fi_tools.files[args.tool]['injection']),\
#'Reproducible injection is not supported: ' + trialdir + '/' + fi_tools.files[args.tool]['injection']
#if os.path.isfile(trialdir + '/'+ fi_tools.files[tool]['injection']):
#print('WARNING: Reproducible injection is not supported, found injection file')
#os.remove(trialdir + '/'+ fi_tools.files[tool]['injection'])
else:
assert False, 'Invalid action:' + action
# Create executable
## Find the program binary to run
if tool == 'pinfi' or tool == 'golden' or tool == 'pinfi-detach':
compiled = 'golden'
elif tool == 'safire':
if instrument == 'omplib':
compiled = 'golden'
else:
compiled = 'safire'
elif tool == 'refine':
if instrument == 'omplib':
compiled = 'golden'
else:
compiled = 'refine'
else:
print('Unknown tool' + tool)
sys.exit(1)
rootdir = '%s/%s/%s/'%(appdir, compiled, config)
## Build the exelist
### Any tool specific exelist header
fi_exelist=[]
if tool in fi_tools.exelist:
if config == 'serial':
fi_exelist = fi_tools.exelist[tool][config][action]
else:
fi_exelist = fi_tools.exelist[tool][config][action][instrument]
#print(fi_exelist)
### Patch APPDIR and NTHREADS if needed
# XXX: replace $APPDIR, needed only for CoMD
exelist = [ s.replace( '$APPDIR', '%s/%s'%(rootdir, app) ) for s in data.programs[config][app]['exec'][inputsize] ]
# XXX: replace NTHREADS, needed only for XSBench omp
exelist = [ s.replace( '$NTHREADS', nthreads ) for s in exelist ]
# XXX: note using rootdir, program binary in data is relative to that
exelist = fi_exelist + [ rootdir + exelist[0] ] + exelist[1:]
exelist = '"%s"'%(' '.join(exelist))
#print('\nexelist< %s >\n'%(exelist))
# XXX: add cleanup to avoid disk space problems
cleanstr = '"%s"'%(data.programs[config][app]['clean'])
## Append to experiments (must be string list)
if action == 'profile':
exps.append( [trialdir, '0', exelist, cleanstr] )
else:
exps.append( [trialdir, str(timeout), exelist, cleanstr] )
#if verbose:
# print(runenv + ['-r', trialdir, str(timeout), exelist])
#sys.exit(123)
#print("==== experiments ====")
#print(exps)
#print("==== end experiments ====")
exps = sorted( exps, key=lambda exp: float(exp[1]), reverse=True )
return exps
def msub(nodes, partition, walltime, ntasks, env, chunk, tool, action, config, wait, nthreads, instrument, inputsize, chunkno, start, end):
m, s = divmod(walltime, 60)
h, m = divmod(m, 60)
#print('%02d:%02d:%02d'%(h, m, s) )
fname = 'submit-moab-%s-%s-%s-%s-%s-%s-%s-%s-%s-%s.sh'%(tool, action, config, wait, nthreads, instrument, inputsize, chunkno, start, end )
print(fname)
with open(fname, 'w') as f:
filestr = '#!/bin/bash\n'
filestr += '#MSUB -l nodes=' + str(nodes) + '\n'
filestr += '#MSUB -l partition=cab\n'
filestr += '#MSUB -l walltime=%02d:%02d:%02d\n'%( h, m, s )
filestr += '#MSUB -q p%s\n'%( partition )
filestr += '#MSUB -V\n'
filestr += '#MSUB -o /usr/workspace/wsb/ggeorgak/moab.out.%j.%N\n'
filestr += '#MSUB -e /usr/workspace/wsb/ggeorgak/moab.err.%j.%N\n'
filestr += 'date\n'
filestr += '$SCRIPTDIR/srun.py -N %s -p %s -n %s '%(nodes, partition, ntasks)
filestr += ' '.join(env)
filestr += ' '
filestr += ' '.join(chunk)
filestr += '\n'
filestr += 'date\n'
filestr += 'echo "MSUB COMPLETED"\n'
f.write(filestr)
# TODO: put descriptive names, remove args access
def generate_jobs(nodes, partition, timelimit, exps, config, wait, tool, action, instrument, nthreads, inputsize, start, end):
nexps = len(exps)
if nthreads == '' or nthreads == '1':
ntasks = 16
elif nthreads == '8':
ntasks = 2
elif nthreads == '16':
ntasks = 1
else:
print('Unrecognize nthreads parameter %s'%( nthreads ) )
sys.exit(1)
env=[]
if config == 'omp':
# ggout KMP_AFFINITY
#env += [ '-e', 'OMP_NUM_THREADS', nthreads, '-e', 'OMP_PROC_BIND', 'close', '-e', 'OMP_WAIT_POLICY', 'passive', '-e', 'KMP_AFFINITY', 'verbose' ]
env += [ '-e', 'OMP_NUM_THREADS', nthreads, '-e', 'OMP_PROC_BIND', 'close', '-e', 'OMP_WAIT_POLICY', wait ]
if tool == 'safire':
if instrument == 'all' or instrument == 'omplib':
env += [ '-e', 'LD_LIBRARY_PATH', homedir + '/usr/local/safire/lib:' + homedir + '/usr/local/lib' ]
else:
env += [ '-e', 'LD_LIBRARY_PATH', homedir + '/usr/local/lib' ]
if tool == 'refine':
if instrument == 'all' or instrument == 'omplib':
env += [ '-e', 'LD_LIBRARY_PATH', homedir + '/usr/local/refine/lib:' + homedir + '/usr/local/lib' ]
else:
env += [ '-e', 'LD_LIBRARY_PATH', homedir + '/usr/local/lib' ]
# First Fit Decreasing bin packing
chunks = []
for e in exps:
newchunk = True
time = e[1]
time = float(time)
if time == 0:
time = 3600
for i, (t, s, c) in enumerate(chunks):
if t + time <= timelimit:
chunks[i] = ( t+time, s+1, c+[e] )
newchunk = False
if newchunk:
chunks.append( ( time, 1, [e] ) )
#for t, s, c in chunks: # ggout
# print('===== CHUNK ====')
# print(c)
# print( 't %d len(c) %d'%( t, s ) )
# print('===== END CHUNK ====')
# break
print( 'total %d'%( len( chunks ) ) )
# Group by tasks in node
for i, chunk_group in enumerate( get_chunk(chunks, nodes*ntasks) ):
#print('=== CHUNK GROUP ===')
#print(chunk_group)
#print('=== END CHUNK GROUP ===')
runlist = []
walltime = 0
task = 0
# create the runlist arguments
for t, s, clist in chunk_group:
for ci in clist:
runlist += ( ['-r'] + ci )
task = ( task + 1 ) % ntasks
walltime = max(walltime, t)
#print('==== RUNLIST ====')
#print(runlist)
#print('==== END RUNLIST ====')
# check if it needs all nodes
#print('chunk_group len %d'%( len( chunk_group ) ) ) # ggout
nodes_group = int ( len( chunk_group ) / ntasks )
if ( len( chunk_group ) % ntasks ) > 0 :
nodes_group += 1
#print('nodes %d -- vs nodes_group %d'%(nodes, nodes_group) )
# round to the next minute
if walltime % 60 != 0:
walltime = walltime + (60 - walltime%60)
#print(walltime)
print('nodes: %d'%( nodes_group ) )
msub(nodes_group, partition, walltime, ntasks, env, runlist, tool, action, config, wait, nthreads, instrument, inputsize, i+1, start, end)
#print('chunkno %d nexps %d'%( chunkno, len(chunk2) ) ) # ggout
#print('Total chunks:%d'%(chunkno))
def get_chunk(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def main():
parser = argparse.ArgumentParser('Generate moab scripts to run experiments')
parser.add_argument('-d', '--appdir', help='applications root directory', required=True)
parser.add_argument('-r', '--resdir', help='results directory', required=True)
parser.add_argument('-a', '--apps', help='applications to run', choices=data.apps+['ALL'], nargs='+', required=True)
parser.add_argument('-t', '--tools', help='tool to run', choices=['safire', 'refine', 'pinfi', 'pinfi-detach', 'golden' ], nargs='+', required=True)
parser.add_argument('-x', '--action', help='action to take', choices=['profile', 'fi', 'fi-0', 'fi-1-15'], required=True)
parser.add_argument('-s', '--start', help='start trial', type=int, required=True)
parser.add_argument('-e', '--end', help='end trial', type=int, required=True)
parser.add_argument('-c', '--mode', help='run configuration', choices=['serial', 'omp', 'ALL'], nargs='+', required=True)
parser.add_argument('-l', '--timelimit', help='target timelimit (seconds)', type=int, required=True)
parser.add_argument('-N', '--nodes', help='target nodes', type=int, required=True)
parser.add_argument('-g', '--generate', help='generate moab jobscripts', default=False, action='store_true')
parser.add_argument('-p', '--partition', help='partition to run experiments', choices=['echo', 'local', 'debug', 'batch' ], required=True)
parser.add_argument('-w', '--wait', help='wait policy', choices=['passive', 'active'] )
args = parser.parse_args()
# Error checking
assert os.path.isdir(args.appdir), 'Applications directory: ' + args.appdir + 'does not exist'
assert args.start <= args.end, 'Start must be < end'
assert args.timelimit > 0, 'Walltime must be > 0'
assert args.nodes > 0, 'Node must be > 0'
if args.apps == ['ALL']:
args.apps = data.apps
# fi tools
for t in args.tools:
for c in args.mode:
if not c in config.data[t]['exec']:
print('Cannot %s with %d' % ( c, t ) )
continue
for w in config.data[t][c]['wait']:
for i in config.data[t][c]['inputs']:
for ins in config.data[t][c]['instrument']:
for n in config.data[t][c][i][ins]['nthreads']:
exps = check(args.appdir, args.resdir, t, c, w, args.apps, args.action, ins, n, i, args.start, args.end, args.generate)
if exps:
print('==== EXPERIMENT ====')
print( 'Experiment: %s %s %s %s %s %s %s %s %s [%s]'%( t, args.action,
c, w, n, ins, i, args.start, args.end, ', '.join(args.apps) ) )
#print(exps)
nexps = len(exps)
print('Nof exps: %d'%( nexps ) )
if args.generate:
#def generate_jobs(exps, c, t, action, ins, n, i, start, end):
generate_jobs(args.nodes, args.partition, args.timelimit, exps,
c, w, t, args.action, ins, n, i, args.start, args.end )
print('==== END EXPERIMENT ====')
if __name__ == "__main__":
main()
|
from manimlib.imports import *
class Reconstruction(VectorScene):
CONFIG = {
"vector1" : [1, 2],
"vector2" : [3, -1],
"vector1_color" : MAROON_C,
"vector2_color" : BLUE,
"vector1_label" : "v",
"vector2_label" : "w",
"sum_color" : PINK,
"scalar_pairs" : [
(1.5, 0.3),
]
}
def construct(self):
# self.lock_in_faded_grid()
#
# v2 = self.add_vector(self.vector2, color = self.vector2_color, stroke_width = 3)
#
# self.initial_scaling(v1, v2)
self.numberline()
self.wait(2)
def get_rate_func_pair(self):
return [
squish_rate_func(smooth, a, b)
for a, b in [(0, 0.7), (0.3, 1)]
]
def initial_scaling(self, v1, v2):
scalar_pair = self.scalar_pairs.pop(0)
anims = [
ApplyMethod(v.scale, s, rate_func = rf)
for v, s, rf in zip(
[v1, v2],
scalar_pair,
self.get_rate_func_pair()
)
]
# anims += [
# ApplyMethod(v.copy().fade, 0.7)
# for v in (v1, v2)
# ]
self.play(*anims, **{"run_time" : 2})
self.wait()
def lock_in_faded_grid(self, dimness=0.7, axes_dimness=0.5):
plane = self.add_plane()
axes = plane.get_axes()
plane.fade(dimness)
axes.set_color(WHITE)
axes.fade(axes_dimness)
self.add(axes)
# self.freeze_background()
def numberline(self):
nl1 = NumberLine(
x_min=-8,
x_max=8,
unit_size=1,
tick_frequency=1,
number_scale_val=0.25,
)
nl1.set_stroke(width=1)
nl1.add_numbers(*range(-6, 6, 1))
self.play(ShowCreation(nl1))
class Texttest(Scene):
def construct(self):
t1 = Matrix([1,2])
b = Brace(t1, LEFT)
b.next_to(t1, LEFT, MED_SMALL_BUFF)
vgp = VGroup(t1, b)
vgp = vgp.add( Circle())
self.play(Write(t1))
self.add(b)
self.wait(2)
self.play(ApplyMethod(vgp.scale, 0.8))
self.wait(1)
class Numberplane(ThreeDScene):
def construct(self):
self.add_plane()
self.wait(2)
def add_plane(self):
plane = NumberPlane(color=RED)
plane.add(plane.get_axis_labels(x_label_tex="z", y_label_tex=r"\Delta h"))
self.add(plane)
return plane
|
"""Usage:
bcm.py <model> [--verbose]
Options:
-h --help This file is used to operate the power system model.
--version (c) Sayonsom Chanda, 2017. Canvass 0.0.1
MIT License. Attribution required.
Software provided AS IS. No WARRANTIES.
--verbose This prints out a very detailed log of whats going on.
Helpful for debugging purposes.
"""
import pprint
import pickle as pickle
import os
import numpy as np
import pandas as pd
from shutil import *
from docopt import docopt
import os
import pandapower as pp
import pandapower.networks as pn
import pandapower.plotting as plot
import sys
import time
import printbanner
if __name__ == '__main__':
v = 1
arguments = docopt(__doc__, version='Canvass 0.0.1')
options = docopt(__doc__, version='Canvass 0.0.1')
filename = arguments['<model>']
filename = "".join([filename,".p"])
os.system("clear")
printBanner()
if v = 1:
print("[1] Canvass can automatically generate substation communication models for you.")
time.sleep(1)
print("[2] A new folder will be created with the name \"{}\", where you will find a \"{}cyberfile.txt\" in Canvass format.")
time.sleep(1)
print("[3] For each substation, a Mininet communication file will be created which you can use to run simulations using Mininet VM in a Linux Environment.")
time.sleep(1)
buildCyberModel(filename,data1)
|
import json
from yelp_beans.data_providers.data_provider import DataProvider
def test_parse(employees):
result = DataProvider()._parse(json.loads(employees))
assert len(result) == 1
assert result[0]["first_name"] == "Darwin"
assert result[0]["last_name"] == "Stoppelman"
assert result[0]["email"] == "darwin@googleapps.com"
assert result[0]["photo_url"] == (
"http://s3-media4.fl.yelpcdn.com/"
"assets/srv0/yelp_large_assets/3f74899c069c/assets/img/"
"illustrations/mascots/darwin@2x.png"
)
assert result[0]["metadata"] == {}
|
#!/usr/bin/python3
"""return TODO list progress"""
import requests
import sys
if __name__ == "__main__":
users = requests.get("https://jsonplaceholder.typicode.com/users/" +
sys.argv[1])
user = users.json().get("name")
all_tasks = requests.get(
"https://jsonplaceholder.typicode.com/todos?userId=" + sys.argv[1])
all_tasks_list = all_tasks.json()
total_tasks = len(all_tasks_list)
done_tasks = 0
done_task_titles = []
for task in all_tasks_list:
if task.get("completed") is True:
done_tasks += 1
done_task_titles.append(task.get("title"))
print("Employee {} is done with tasks({}/{}):".format(user, done_tasks,
total_tasks))
for title in done_task_titles:
print("\t {}".format(title))
|
# Napisz funkcję, która wygeneruje macierz wielowymiarową postaci:
# [[2 4 6]
# [4 2 4]
# [6 4 2]]
# Przy założeniach:
# funkcja przyjmuje parametr n, który określa wymiary macierzy jako n*n i umieszcza
# wielokrotność liczby 2 na kolejnych jej przekątnych rozchodzących się od głównej
# przekątnej.
import numpy as np
def Generate_multidimensional_matrix(n):
"""Function generates multidimensional matrix size n*n"""
matrix = np.diag([2 for a in range(n)])
shift = 4
for x in range(1, n):
matrix += np.diag([shift for a in range(n-x)], x)
matrix += np.diag([shift for a in range(n-x)], -x)
shift += 2
print(matrix)
Generate_multidimensional_matrix(2)
Generate_multidimensional_matrix(3)
Generate_multidimensional_matrix(4)
Generate_multidimensional_matrix(5) |
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class QuizUser(AbstractUser):
score = models.IntegerField(null=True, default=0)
|
import numpy as np
import os, re, sys
from sklearn import metrics
from multiprocessing.dummy import Pool as ThreadPool
from sklearn.externals import joblib
from utils.myAUC import *
from utils.MISVM import *
from utils.testSVM import *
prop = os.getcwd().split('/')[-2]
nthreads = 10
seed0 = 1000001 + 100
np.random.seed(seed0)
dirpath = 'dir'
os.system('mkdir -p %s' % dirpath)
resfile = 'results.txt'
fpres = open(resfile, 'w')
fpres.write('roc roc_sent roc_macro roc_sent_macro fprAUC\n')
fpres.close
Datapath = './data'
trfile = '%s/train-data.dat' % Datapath
trlblfile = '%s/new_train-label.dat' % Datapath
tfile = '%s/test-data.dat' % Datapath
tlblfile = '%s/new_test-label.dat' % Datapath
vfile = '%s/test-data.dat' % Datapath
vlblfile = '%s/new_test-label.dat' % Datapath
vocabfile = '%s/vocabs.txt' % Datapath
trslblfile = '%s/train-sentlabel.dat' % Datapath
tslblfile = '%s/new_test-sentlabel.dat' % Datapath
sent_trfile = '%s/train-data.dat' % Datapath
sent_tfile = '%s/test-data.dat' % Datapath
trlbl = np.loadtxt(trlblfile)
tlbl = np.loadtxt(tlblfile)
(Dtr, C) = trlbl.shape
Dt = tlbl.shape[0]
N = len(open(vocabfile).readlines())
def read_docs(docfile, lbld_docs, lbls, outfile):
fp = open(docfile)
C = lbls.shape[1]
fpout = list()
for c in range(C):
fpout.append(open(outfile + '_' + str(c), 'w'))
d = 0
d0 = 0
while True:
ln = fp.readline()
if len(ln) == 0:
break
if d not in lbld_docs:
d += 1
continue
Sd = int(re.findall('^<([0-9]*)>', ln)[0])
sents = re.findall('<[0-9]*?>([0-9 ]*)', ln)
for s, sent in enumerate(sents[1:]):
doc = {}
words = sent.split()
Ls = len(words)
for w in words:
try:
doc[w] += 1.0 / Ls
except KeyError:
doc.update({w: 1.0 / Ls})
txt = ','.join(['%d:%f' % (int(w) + 1, doc[w]) for w in doc.keys()])
for c in range(C):
fpout[c].write('s_%d_d_%d,d%d,%d,%s\n' % (s, d0, d0, lbls[d, c], txt))
d += 1
d0 += 1
fp.close()
for c in range(C):
fpout[c].close()
# read training docs
ind = np.where(trlbl[:, 0] != -1)[0]
trlbl0 = trlbl.copy()
trlbl = trlbl[ind, :].copy()
read_docs(trfile, ind, trlbl0, 'dir/trfile')
# test docs
ind = np.where(tlbl[:, 0] != -1)[0]
read_docs(tfile, ind, tlbl, 'dir/tfile')
# validation files
vlbl = np.loadtxt(vlblfile)
ind = np.where(vlbl[:, 0] != -1)[0]
read_docs(vfile, ind, vlbl, 'dir/vfile')
# total number of sentences
Sd_total = 0
fp = open(sent_tfile)
while True:
ln = fp.readline()
if len(ln) == 0:
break
sd = re.findall('^<([0-9]*)>', ln)[0]
Sd_total += int(sd)
slbl_gt = open(tslblfile).readlines()
gt_sent2 = np.zeros((Sd_total, C))
cnt = 0
cnt_lbld = 0
slbld_list = []
for d in range(len(slbl_gt)):
sents_gt = re.findall('<(.*?)>', slbl_gt[d])
for sent in sents_gt:
temp = np.array([float(x) for x in sent.split()])
if temp[0] == -1:
cnt += 1
continue
gt_sent2[cnt_lbld, :] = temp.copy()
slbld_list.append(cnt)
cnt_lbld += 1
cnt += 1
gt_sent = gt_sent2[:cnt_lbld, :].copy()
# training
clist = np.logspace(-2, 2, 5)
vccr = np.zeros(len(clist))
def train_model_class(c):
global svmC
global c0
global valid_pred
if os.path.isfile('dir/tmp_%d_%d.tar.gz' % (c, c0)):
os.system('tar -zxf dir/tmp_%d_%d.tar.gz ' % (c, c0))
clf = joblib.load('dir/tmp_%d_%d/model.pkl' % (c, c0))
else:
(bag_pred_f, ypred_f, clf, bags) = MISVM(N, 'dir/trfile_' + str(c), svmC)
# save model
os.system('mkdir -p dir/tmp_%d_%d' % (c, c0))
joblib.dump(clf, 'dir/tmp_%d_%d/model.pkl' % (c, c0))
# validation
(bag_pred_f, ypred_f, bags) = testSVM(N, 'dir/vfile_%d' % c, clf)
valid_pred[:, c] = np.round(bag_pred_f)
if not os.path.isfile('dir/tmp_%d_%d.tar.gz' % (c, c0)):
os.system('tar -zcf dir/tmp_%d_%d.tar.gz dir/tmp_%d_%d' % (c, c0, c, c0))
os.system('rm -r dir/tmp_%d_%d' % (c, c0))
pool = ThreadPool(nthreads)
for c0, svmC in enumerate(clist):
runC = [c for c in range(C) if np.sum(trlbl[:, c]) > 0]
valid_pred = np.zeros(vlbl.shape)
# write the mfiles
# for c in range(C):
pool.map(train_model_class, runC)
vccr[c0] = np.mean((vlbl == valid_pred) ** 2)
print('>>>>', c0, svmC, vccr[c0])
c0 = np.argmax(vccr)
bestC = clist[c0]
print("Best C", bestC)
# make prediction on the test set
test_pred = np.zeros(tlbl.shape)
temp_sent = np.zeros(gt_sent2.shape)
test_sent_pred = np.zeros(gt_sent.shape)
svmC = bestC
# for c in range(C):
def test_model_class(c):
global test_pred
global test_sent_pred
global temp_sent
global svmC
global c0
global slbld_list
os.system('tar -zxf dir/tmp_%d_%d.tar.gz ' % (c, c0))
clf = joblib.load('dir/tmp_%d_%d/model.pkl' % (c, c0))
(test_pred[:, c], temp_sent[:, c], bags) = testSVM(N, 'dir/tfile_%d' % c, clf)
test_sent_pred[:, c] = temp_sent[slbld_list, c].copy()
print("Test Model")
pool.map(test_model_class, [c for c in range(C)])
print("AUC Started")
(roc, roc_macro) = compute_auc(test_pred, tlbl)
(roc_sent, roc_sent_macro) = compute_auc(test_sent_pred, gt_sent)
# ThFprAUC for documents with no labels
nolbld = np.where(np.sum(tlbl, 1) == 0)[0]
if len(nolbld) > 0:
TH = np.linspace(0, 1, 50)
fpr = np.zeros(len(TH))
for t, th in enumerate(TH):
pred = np.round(test_pred[nolbld] > th)
tn = np.sum((1 - pred) == 1)
fp = np.sum(pred == 1)
fpr[t] = fp / float(fp + tn)
fprAUC = metrics.auc(TH, fpr)
else:
fprAUC = 0
fpres = open(resfile, 'a')
fpres.write('%f %f %f %f %f\n' % (roc, roc_sent, roc_macro, roc_sent_macro, fprAUC))
fpres.close()
os.system('rm -r dir')
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String
def callback(data):
rospy.loginfo(rospy.get_caller_id() + "I heard %s" , data.data)
def listener1():
rospy.init_node('listener1', anonymous=True)
rospy.Subscriber("chatter",String,callback)
rospy.spin()
if __name__=='__main__':
listener1()
|
from collections import defaultdict
class graph:
def __init__(self):
self.graph=defaultdict(list)
def addedge(self,u,v):
self.graph[u].append(v)
def Bfs(self,s):
vis=[]
bfs=[]
Q=[]
Q.append(s)
while Q:
a=Q.pop(0)
vis.append(a)
bfs.append(a)
for i in self.graph[a]:
if i not in vis:
vis.append(i)
Q.append(i)
print(bfs)
g=graph()
g.addedge(1,2)
g.addedge(2,3)
g.addedge(3,4)
g.addedge(2,7)
g.addedge(3,5)
g.addedge(7,5)
g.addedge(5,8)
g.addedge(5,6)
g.Bfs(1)
|
dic = {
'boy': '소년',
'school': '학교',
'book': '책'
}
dic['boy'] = '남자아이' # 기존 boy를 덮어써버림
dic['girl'] = '소녀'
del dic['book']
print(dic)
print(dic.keys())
print(dic.values())
print(dic.items())
for key, value in dic.items():
print(key, value)
dic2 = {
'student': '학생', 'teacher': '선생님', 'book': '서적'
}
dic.update(dic2) # key값이 겹치는 건 dic2의 값으로 덮어씌워버림
print(dic) |
# pizzastore.py
import abc
class PizzaStore(abc.ABC):
def order_pizza(self, type: str):
pizza = self.create_pizza(type)
pizza.prepare()
pizza.bake()
pizza.cut()
pizza.box()
return pizza
@abc.abstractmethod
def create_pizza(self, type: str):
pass
class NYStylePizzaStore(PizzaStore):
def create_pizza(self, type: str):
pizza = None
ingredient_factory = NYPizzaIngredientFactory()
if type == 'cheese':
pizza = CheesePizza(ingredient_factory)
pizza.set_name('New York Style Cheese Pizza')
elif type == 'clam':
pizza = ClamPizza(ingredient_factory)
pizza.set_name('New York Style Clam Pizza')
return pizza
class ChicagoStylePizzaStore(PizzaStore):
def create_pizza(self, type: str):
pizza = None
ingredient_factory = ChicagoPizzaIngredientFactory()
if type == 'cheese':
pizza = CheesePizza(ingredient_factory)
pizza.set_name('Chicago Style Cheese Pizza')
elif type == 'clam':
pizza = ClamPizza(ingredient_factory)
pizza.set_name('Chicago Style Clam Pizza')
return pizza
|
def f(n):
i=1
z = set("0123456789")
s = set()
prev = -1
while True:
next = n*i
if next == prev: break
t = set(str(next))
s = s.union(t)
if s == z:
return next
i+=1
prev = next
return "INSOMNIA"
import sys
sys.stdin = open(r"C:\Users\jake\Downloads\A-large.in", 'r')
sys.stdout = open(r"C:\Users\jake\code\A.large.out", 'w')
x = int(input())
for i in range(x):
z = int(input())
print("Case #%d:" % (i+1), f(z))
sys.stdin.close()
sys.stdout.close()
|
import os
import re
import sys
import argparse
import pytest
from six import StringIO
from llnl.util.filesystem import working_dir
from llnl.util.tty.colify import colify
from pymod.util.tty import redirect_stdout2 as redirect_stdout
import pymod.paths
import pymod.modulepath
description = "run pymod's unit tests"
section = "developer"
level = "long"
def setup_parser(subparser):
subparser.add_argument(
"-H",
"--pytest-help",
action="store_true",
default=False,
help="print full pytest help message, showing advanced options",
)
list_group = subparser.add_mutually_exclusive_group()
list_group.add_argument(
"-l", "--list", action="store_true", default=False, help="list basic test names"
)
list_group.add_argument(
"-L",
"--long-list",
action="store_true",
default=False,
help="list the entire hierarchy of tests",
)
subparser.add_argument(
"tests",
nargs=argparse.REMAINDER,
help="list of tests to run (will be passed to pytest -k)",
)
def do_list(args, unknown_args):
"""Print a lists of tests than what pytest offers."""
# Run test collection and get the tree out.
old_output = sys.stderr
try:
sys.stderr = output = StringIO()
pytest.main(["--collect-only"])
finally:
sys.stderr = old_output
# put the output in a more readable tree format.
lines = output.getvalue().split("\n")
output_lines = []
for line in lines:
match = re.match(r"(\s*)<([^ ]*) '([^']*)'", line)
if not match:
continue
indent, nodetype, name = match.groups()
# only print top-level for short list
if args.list:
if not indent:
output_lines.append(os.path.basename(name).replace(".py", ""))
else:
sys.stderr.write(indent + name)
if args.list:
colify(output_lines)
def test(parser, args, unknown_args):
if args.pytest_help:
# make the pytest.main help output more accurate
with redirect_stdout():
sys.argv[0] = "pymod test"
pytest.main(["-h"])
return
pytest_root = pymod.paths.test_path
# pytest.ini lives in the root of the pymod repository.
with redirect_stdout():
with working_dir(pytest_root):
# --list and --long-list print the test output better.
if args.list or args.long_list:
do_list(args, unknown_args)
return
# Allow keyword search without -k if no options are specified
if (
args.tests
and not unknown_args
and not any(arg.startswith("-") for arg in args.tests)
):
return pytest.main(["-k"] + args.tests)
# Just run the pytest command
return pytest.main(unknown_args + args.tests)
|
from datetime import datetime
from flask import Blueprint, request
from app.forecast import retrieve_forecast
from dateutil.parser import isoparse
api = Blueprint("api", __name__)
@api.route("/forecast/")
def get_forecast():
time = isoparse(request.args.get("dt"))
try:
resp = retrieve_forecast(
city=request.args.get("city"), timestamp=time
).as_json()
except ValueError as e:
resp = ({"error": str(e)}, 404)
return resp
@api.route("/current/")
def get_current_forecast():
time = datetime.now()
try:
resp = retrieve_forecast(
city=request.args.get("city"), timestamp=time
).as_json()
except ValueError as e:
resp = ({"error": str(e)}, 404)
return resp
|
# This isn't correct, but at least it will give me a benchmark of where I am.
# Link to code challenge instructions
import statistics
import numpy as np
class Stats():
# creating an empty list
our_list = [] # setting our list as the attribute?
def __init__(self, mean, median, mode, variance, std_dev, coeff):
self.mean = sum(list) / len(list)
self.median = sum(list) / 2
self.mode = statistics.mode(list)
self.variance = np.var(list)
self.std_dev = stdev(list)
self.coeff = self.std_dev / self.mean
def mean(self): # okay, this is where I'm confused. I define it above. and again below?
return sum(list) / len(list)
def median(self):
return sum(list) / 2
def mode(self):
return statistics.mode(list)
def variance(self):
return np.var(list)
def std_dev(self):
return stdev(list)
def coeff(self):
return self.std_dev / self.mean
##### DIRECTIONS #####
"""
Create a ToolBox dir
Inside the ToolBox dir, make a stats_tools.py file
Inside stats_tools.py make a Stats class
Your Stats class should take in a list
Stats class methods include the mean, median (for odd and even N), mode, variance, standard deviation, and coefficient of variation
You may use NumPy but not for its math methods
Git status, add, status, commit, push
Make a pull request when you are done
"""
|
from __future__ import division
import numpy as np
import scipy as sp
from numpy.random import random
class SVD_C:
def __init__(self,X,k=20):
'''
k is the length of vector
'''
self.X=np.array(X)
self.k=k
self.ave=np.mean(self.X[:,2])
print "the input data size is ",self.X.shape
self.bi={}
self.bu={}
self.qi={}
self.pu={}
self.movie_user={}
self.user_movie={}
for i in range(self.X.shape[0]):
uid=self.X[i][0]
mid=self.X[i][1]
rat=self.X[i][2]
self.movie_user.setdefault(mid,{})
self.user_movie.setdefault(uid,{})
self.movie_user[mid][uid]=rat
self.user_movie[uid][mid]=rat
self.bi.setdefault(mid,0)
self.bu.setdefault(uid,0)
self.qi.setdefault(mid,random((self.k,1))/10*(np.sqrt(self.k)))
self.pu.setdefault(uid,random((self.k,1))/10*(np.sqrt(self.k)))
def pred(self,uid,mid):
self.bi.setdefault(mid,0)
self.bu.setdefault(uid,0)
self.qi.setdefault(mid,np.zeros((self.k,1)))
self.pu.setdefault(uid,np.zeros((self.k,1)))
if (self.qi[mid]==None):
self.qi[mid]=np.zeros((self.k,1))
if (self.pu[uid]==None):
self.pu[uid]=np.zeros((self.k,1))
ans=self.ave+self.bi[mid]+self.bu[uid]+np.sum(self.qi[mid]*self.pu[uid])
if ans>5:
return 5
elif ans<1:
return 1
return ans
def train(self,steps=20,gamma=0.04,Lambda=0.15):
for step in range(steps):
print 'the ',step,'-th step is running'
rmse_sum=0.0
kk=np.random.permutation(self.X.shape[0])
for j in range(self.X.shape[0]):
i=kk[j]
uid=self.X[i][0]
mid=self.X[i][1]
rat=self.X[i][2]
eui=rat-self.pred(uid,mid)
rmse_sum+=eui**2
self.bu[uid]+=gamma*(eui-Lambda*self.bu[uid])
self.bi[mid]+=gamma*(eui-Lambda*self.bi[mid])
temp=self.qi[mid]
self.qi[mid]+=gamma*(eui*self.pu[uid]-Lambda*self.qi[mid])
self.pu[uid]+=gamma*(eui*temp-Lambda*self.pu[uid])
gamma=gamma*0.93
print "the rmse of this step on train data is ",np.sqrt(rmse_sum/self.X.shape[0])
#self.test(test_data)
def test(self,test_X):
output=[]
sums=0
test_X=np.array(test_X)
#print "the test data size is ",test_X.shape
for i in range(test_X.shape[0]):
pre=self.pred(test_X[i][0],test_X[i][1])
output.append(pre)
#print pre,test_X[i][2]
sums+=(pre-test_X[i][2])**2
rmse=np.sqrt(sums/test_X.shape[0])
print "the rmse on test data is ",rmse
return output |
from vigorish.status.util import (
get_date_status,
get_date_status_from_bbref_game_id,
get_game_status,
)
from vigorish.util.dt_format_strings import DATE_ONLY_2
from vigorish.util.result import Result
def update_bbref_games_for_date_list(scraped_data, db_session, scraped_bbref_dates, apply_patch_list=True):
missing_json = []
for game_date in sorted(scraped_bbref_dates):
games_for_date = scraped_data.get_bbref_games_for_date(game_date, apply_patch_list)
if not games_for_date:
missing_json.append(game_date.strftime(DATE_ONLY_2))
continue
result = update_bbref_games_for_date_single_date(db_session, games_for_date)
if result.failure:
return result
return (
Result.Ok()
if not missing_json
else Result.Fail(f"Failed to retrieve BBRef Games for Date JSON files: {','.join(missing_json)}")
)
def update_bbref_games_for_date_single_date(db_session, games_for_date):
game_date = games_for_date.game_date
result = get_date_status(db_session, game_date)
if result.failure:
return result
date_status = result.value
update_date_status_record(date_status, games_for_date)
result = create_game_status_records(db_session, games_for_date)
if result.failure:
return result
return Result.Ok()
def update_date_status_record(date_status, games_for_date):
date_status.scraped_daily_dash_bbref = 1
date_status.game_count_bbref = games_for_date.game_count
def create_game_status_records(db_session, games_for_date):
game_date = games_for_date.game_date
for bbref_game_id in games_for_date.all_bbref_game_ids:
result = get_date_status_from_bbref_game_id(db_session, bbref_game_id)
if result.failure:
return result
date_status = result.value
game_status = get_game_status(db_session, game_date, bbref_game_id, None)
game_status.game_date = game_date
game_status.bbref_game_id = bbref_game_id
game_status.scrape_status_date_id = date_status.id
game_status.season_id = date_status.season_id
return Result.Ok()
|
import re
def output(text):
##------
## This formats output for provided text based on tags
## and is used in the Methods.py
##
## Example: print(display.output("<Red>My Text<Reset>\n"))
## "Red" defines the color and <Reset> changes the text
## color back. '\n' generates a carriage return
##------
for i in range(50):
# STYLES
text = re.sub("<Bold>","\033[1m",text)
text = re.sub("<Dim>","\033[2m",text)
text = re.sub("<Underlined>","\033[4m",text)
text = re.sub("<Blink>","\033[5m",text)
text = re.sub("<Reverse>","\033[7m",text)
text = re.sub("<Hidden>","\033[8m",text)
# TEXT COLOR
text = re.sub("<Default>","\033[39m",text)
text = re.sub("<Black>","\033[30m",text)
text = re.sub("<Red>","\033[31m",text)
text = re.sub("<Green>","\033[32m",text)
text = re.sub("<Yellow>","\033[33m",text)
text = re.sub("<Blue>","\033[34m",text)
text = re.sub("<Magenta>","\033[35m",text)
text = re.sub("<Cyan>","\033[36m",text)
text = re.sub("<LightGray>","\033[37m",text)
text = re.sub("<DarkGray>","\033[90m",text)
text = re.sub("<LightRed>","\033[91m",text)
text = re.sub("<LightGreen>","\033[92m",text)
text = re.sub("<LightYellow>","\033[93m",text)
text = re.sub("<LightBlue>","\033[94m",text)
text = re.sub("<LightMagenta>","\033[95m",text)
text = re.sub("<LightCyan>","\033[96m",text)
text = re.sub("<White>","\033[97m",text)
# BACKGROUND COLOR
text = re.sub("<BackgroundDefault>","\033[49m",text)
text = re.sub("<BackgroundBlack>","\033[40m",text)
text = re.sub("<BackgroundRed>","\033[41m",text)
text = re.sub("<BackgroundGreen>","\033[42m",text)
text = re.sub("<BackgroundYellow>","\033[43m",text)
text = re.sub("<BackgroundBlue>","\033[44m",text)
text = re.sub("<BackgroundMagenta>","\033[45m",text)
text = re.sub("<BackgroundCyan>","\033[46m",text)
text = re.sub("<BackgroundLightGray>","\033[47m",text)
text = re.sub("<BackgroundDarkGray>","\033[100m",text)
text = re.sub("<BackgroundLightRed>","\033[101m",text)
text = re.sub("<BackgroundLightGreen>","\033[102m",text)
text = re.sub("<BackgroundLightYellow>","\033[103m",text)
text = re.sub("<BackgroundLightBlue>","\033[104m",text)
text = re.sub("<BackgroundLightMagenta>","\033[105m",text)
text = re.sub("<BackgroundLightCyan>","\033[106m",text)
text = re.sub("<BackgroundWhite>","\033[107m",text)
# RESETS
text = re.sub("<Reset>","\033[0m",text)
text = re.sub("<ResetBold>","\033[21m",text)
text = re.sub("<ResetDim>","\033[22m",text)
text = re.sub("<ResetUnderlined>","\033[24m",text)
text = re.sub("<ResetBlink>","\033[25m",text)
text = re.sub("<ResetReverse>","\033[27m",text)
text = re.sub("<ResetHidden>","\033[28m",text)
return text |
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 7 09:04:08 2021
@author: eduardo.alcala
"""
"""
import os
import xml.etree.ElementTree as ET
def printChildInfo( child, lvl ):
auxtag = child.tag[ child.tag.find("}") + 1 : ]
print( "{}{}".format( lvl, auxtag ) )
for key, values in child.attrib.items():
auxKey = key[ key.find("}") + 1 : ]
if auxKey not in ("Sello", "schemaLocation", "Certificado"):
print( "{}{}: {}".format( lvl + "\t", auxKey, values) )
if len(child) != 0:
for i in range( len(child) ):
printChildInfo( child[i], lvl + "\t" )
ruta = "xml"
filename = "DV_CFDI33_Complemento_FechaFinalPago_Igual_a_FechaInicialPago.xml"
for file in os.listdir(ruta):
fileact = os.path.join( ruta, file)
tree = ET.parse(fileact)
root = tree.getroot()
printChildInfo(root, "")
"""
import os
import xml.dom.minidom
#from matplotlib import pyplot as plt
ruta = "C:/Users/Eduardo.Alcala/Desktop/pruebaPythonXML/xmltest/xml/"
def funcionPrueba(filename):
docs = xml.dom.minidom.parse(ruta+filename)
#print("Test archivos: ",docs)
ubicacion = docs.getElementsByTagName("nomina12:Receptor")
sueldo = docs.getElementsByTagName("nomina12:Nomina")
usuario = docs.getElementsByTagName("cfdi:Receptor")
for i in ubicacion:
#print(i.getAttribute("ClaveEntFed"))
info={}
valores={}
usuario={}
info['ClaveEntFed']= i.getAttribute("ClaveEntFed")
valores['ClaveEntFed']= i.getAttribute("ClaveEntFed")
for a in sueldo:
info['FechaPago']= a.getAttribute("FechaPago")
info['TotalPercepciones']= a.getAttribute("TotalPercepciones")
if a.getAttribute("TotalPercepciones") == "":
valores['TotalPercepciones'] = 0
else:
valores['TotalPercepciones']= a.getAttribute("TotalPercepciones")
info['TotalDeducciones']= a.getAttribute("TotalDeducciones")
info['TotalOtrosPagos']= a.getAttribute("TotalOtrosPagos")
for c in usuario:
info['Receptor']= c.getAttribute("Nombre")
info['Rfc']= c.getAttribute("Rfc")
#print(valores)
#a = (sum(float(valores['TotalPercepciones'])) for linea in valores)
#print(int(a))
print(info)
#plt.bar(valores["ClaveEntFed"], valores["TotalPercepciones"])
#plt.show()
#estados = ['JAL', 'MEX', 'YUC', 'QRO', 'SLP', 'MTY']
#sueldoPromedio = [31596, 27312, 25971, 17136, 20597, 17449]
#plt.bar(estados, sueldoPromedio)
#plt.show()
def funciondos():
files = os.listdir("C:/Users/Eduardo.Alcala/Desktop/pruebaPythonXML/xmltest/xml")
#Hacer validacion de archivos xml
#print(files)
for file in files:
funcionPrueba(file)
funciondos()
"""
#docs = xml.dom.minidom.parse("DV CFDI33 - Complemento - FechaFinalPago - Mayor a FechaInicialPago.xml")
#plt.bar(valores["ClaveEntFed"], valores["TotalPercepciones"])
#plt.show()
estados = ['JAL', 'MEX', 'YUC', 'QRO', 'SLP', 'MTY']
sueldoPromedio = [31596, 27312, 25971, 17136, 20597, 17449]
plt.bar(estados, sueldoPromedio)
# Displaying the bar plot
plt.show()
"""
|
def note():
note = input("Write Your Notes Here..... ")
with open("note.txt", "a") as file:
file.write(f"/n {note}")
note() |
import torch
import torch.nn as nn
from typing import List
class VFE_Layer(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
"""
A VFE layer class
:param c_in: int, channel dimension of input
:param c_out: int, the dimension of output after VFE, must be even
"""
super(VFE_Layer, self).__init__()
if out_channels % 2 != 0:
raise ValueError(f'c_out must be even, got {out_channels}')
self.units = out_channels // 2
self.fcn = nn.Sequential(
nn.Linear(in_features=in_channels, out_features=out_channels),
nn.ReLU()
)
self.bn = nn.BatchNorm2d(num_features=out_channels)
# TODO: how do switch training on/off
def __call__(self, inputs, mask):
"""
Call method of class
:param inputs: Tensor [batch_size=B, out_dim=F, max_num_voxels=V, max_num_points=P]
:param mask: TODO: what is this?
:return: Tensor [B, c_out=C, V, P]
"""
# [B, C//2, V, P]
fcn_out = self.bn(self.fcn(inputs))
# [B, C//2, V, 1]
max_pool = torch.max(fcn_out, dim=-1, keepdim=True)
tiled_max_pool = max_pool.repeat([1, 1, 1, fcn_out.size(-1)])
# [B, C, V, P]
output = torch.cat([fcn_out, tiled_max_pool], dim=1)
# TODO: I think it broadcasts
#mask = mask.repeat([1, 2*self.units, 1, 1,])
return output * mask.float()
class VFE_Block(nn.Module):
def __init__(self, in_channels:int,
vfe_out_dims:List[int],
final_dim:int,
sparse_shape):
"""
:param in_channels: int, channel dimension of input
:param vfe_out_dims: n-integer list of output dims of VFEs, where each must be even
:param final_dim: int, dimension of layer dense layer after VFEs
TODO: is it depth, height, width?
:param sparse_shape: [depth, height, width] of voxel space
"""
super(VFE_Block, self).__init__()
self.vfe_out_dims = vfe_out_dims
self.final_dim = final_dim
self.sparse_shape = sparse_shape
self.VFEs = nn.ModuleList(
[VFE_Layer(dim_in, dim_out)
for dim_in, dim_out
in zip([in_channels] + vfe_out_dims[:-1], vfe_out_dims)]
)
self.final_fcn = nn.Sequential(
nn.Linear(in_features=vfe_out_dims[-1], out_features=final_dim),
nn.ReLU()
)
def forward(self, inputs, voxel_coor_buffer, shape):
"""
TODO: K?, T?
:param inputs: Tensor [batch_size=B, out_dim=F, max_num_voxels=V, max_num_points=P]
:param voxel_coor_buffer: int32 Tensor [batch_size, 4]
:param shape: TODO [depth=D, height=H, width=W] ?
:return: Tensor [B, C, depth, height, width]
"""
vfe_out = inputs
# [B, 1, V, P]
mask = torch.ne(torch.max(inputs, dim=1, keepdim=True), 0)
for i, vfe in enumerate(self.VFEs):
vfe_out = vfe(vfe_out, mask)
# [B, C, V, P]
final = self.final_fcn(vfe_out)
# [B, C, V]
final = torch.max(final, dim=-1)
# [B, C] -> [B, 1, 1, 1, C] -> [B, D, H, W, C]
voxel_sparse = torch.zeros_like(final[..., 0])[:, None, None, None].repeat(
[1, *shape, 1]
)
idx1, idx2, idx3, idx4 = voxel_coor_buffer.permute((1, 0))
# [B, C, V] -> [B, V, C] scattered to [B, D, H, W, C]
voxel_sparse[idx1, idx2, idx3, idx4] = final.permute((0, 2, 1))
# [B, D, H, W, C] -> [B, C, D, H, W]
return voxel_sparse.permute((0, 4, 1, 2, 3))
class ConvMiddleLayer(nn.Module):
def __init__(self, in_channels, out_shape):
"""
Convolutional Middle layer class
:param in_channels: number of channels of input
:param out_shape: dimensions of output [batch_size=B, new_channels=C, height=H, width=W]
"""
self.out_shape = out_shape
self.conv1 = nn.Conv3d(in_channels=in_channels, out_channels=64,
kernel_size=3, stride=(2, 1, 1))
self.conv2 = nn.Conv3d(in_channels=in_channels, out_channels=64,
kernel_size=3, stride=(1, 1, 1))
self.conv3 = nn.Conv3d(in_channels=in_channels, out_channels=64,
kernel_size=3, stride=(2, 1, 1))
self.bn1, self.bn2, self.bn3 = [nn.BatchNorm3d(num_features=64) for _ in range(3)]
self.pad1 = torch.nn.ConstantPad3d(padding=(1, 1) * 3, value=0)
self.pad2 = torch.nn.ConstantPad3d(padding=(0, 0) + (1, 1) * 2, value=0)
self.pad3 = torch.nn.ConstantPad3d(padding=(1, 1) * 3, value=0)
self.relu1, self.relu2, self.relu3 = [torch.nn.ReLU() for _ in range(3)]
def forward(self, inputs):
"""
:param inputs: Tensor [batch_size=B, channels=C, depth=D, height=H, width=W]
:return:
"""
out = inputs
for i in range(3):
out = getattr(self, f'conv{i}')(out)
out = getattr(self, f'bn{i}')(out)
out = getattr(self, f'relu{i}')(out)
out = getattr(self, f'pad{i}')(out)
out = torch.reshape(out, self.out_shape)
return out
class RPN(nn.Module):
def __init__(self, in_channels, num_anchors_per_cell):
super(RPN, self).__init__()
self.num_anchors_per_cell = num_anchors_per_cell
BN = nn.BatchNorm2d
self.conv1_block1, self.bn1_block1 = self._conv_layer(128, 3, 2, 1, c_in=in_channels), BN(128)
for i in range(2, 5):
setattr(self, f'conv{i}_block1', self._conv_layer(128, 3, 1, 1))
setattr(self, f'bn{i}_block1', BN(128))
self.conv1_block2, self.bn1_block2 = self._conv_layer(128, 3, 2, 1), BN(128)
for i in range(2, 7):
setattr(self, f'conv{i}_block2', self._conv_layer(128, 3, 1, 1))
setattr(self, f'bn{i}_block2', BN(128))
self.conv1_block3, self.bn1_block3 = self._conv_layer(256, 3, 2, 1, 1, c_in=128), BN(128)
for i in range(2, 7):
setattr(self, f'conv{i}_block3', self._conv_layer(256, 3, 1, 1))
setattr(self, f'bn{i}_block3', BN(256))
self.deconv_1, self.deconv_bn1 = self._deconv_layer(256, 3, 1, 1), BN(256)
self.deconv_2, self.deconv_bn2 = self._deconv_layer(256, 2, 2, 0), BN(256)
self.deconv_2, self.deconv_bn2 = self._deconv_layer(256, 4, 4, 0), BN(256)
self.prob_map_conv = self._conv_layer(self.num_anchors_per_cell, 1, 1, 0, c_in=256 * 3)
self.reg_map_conv = self._conv_layer(7 * self.num_anchors_per_cell, 1, 1, 0, c_in=256 * 3)
def _conv_layer(self, c_out, kernel_size, stride, padding, c_in=None):
if c_in is None:
c_in = c_out
return nn.Conv2d(out_channels=c_out,
in_channels=c_in,
padding=1,
kernel_size=3,
stride=stride)
def _deconv_layer(self, c_out, kernel_size, stride, padding):
return nn.ConvTranspose2d(in_channels=c_out,
out_channels=c_out,
kernel_size=kernel_size,
stride=stride,
padding=padding)
def block_conv(self, block_idx, x):
i = 1
while True:
try:
x = getattr(self, f'conv{i}_block{block_idx}')(x)
x = getattr(self, f'bn{i}_block{block_idx}')(x)
x = nn.functional.relu(x)
i += 1
except:
break
return x
def deconv(self, idx, x):
x = getattr(x, f'deconv_{idx}')(x)
x = getattr(x, f'deconv_bn{idx}')(x)
return nn.functional.relu(x)
def forward(self, x):
# TODO: add assertion about shape
deconvs = []
for i in range(1, 4):
x = self.block_conv(i, x)
deconvs.append(self.deconv(i, x))
x = torch.cat(deconvs, dim=1)
prob_map = self.prob_map_conv(x)
reg_map = self.reg_map_conv(x)
# TODO: should these be transposed as in the code?
# should sigmoid be applied to reg_map
return prob_map, reg_map
|
from py_linq import Enumerable
import pytest
_simple = [1, 2, 3]
_complex = [{"value": 1}, {"value": 2}, {"value": 3}]
_locations = [
("Scotland", "Edinburgh", "Branch1", 20000),
("Scotland", "Glasgow", "Branch1", 12500),
("Scotland", "Glasgow", "Branch2", 12000),
("Wales", "Cardiff", "Branch1", 29700),
("Wales", "Cardiff", "Branch2", 30000),
("Wales", "Bangor", "Branch1", 12800),
("England", "London", "Branch1", 90000),
("England", "London", "Branch2", 80000),
("England", "London", "Branch3", 70000),
("England", "Manchester", "Branch1", 45600),
("England", "Manchester", "Branch2", 50000),
("England", "Liverpool", "Branch1", 29700),
("England", "Liverpool", "Branch2", 25000),
]
@pytest.fixture
def empty() -> Enumerable:
return Enumerable([])
@pytest.fixture
def simple() -> Enumerable:
return Enumerable(_simple)
@pytest.fixture
def complex() -> Enumerable:
return Enumerable(_complex)
@pytest.fixture
def locations() -> Enumerable:
return Enumerable(_locations)
def simple_generator(max: int):
for i in range(max + 1):
yield i
class Val(object):
def __init__(self, number, power):
self.number = number
self.power = power
def __str__(self):
return "VAL {0}: {1}".format(self.number, self.power)
def powers(base: int, num_iterations: int = 2):
for i in range(num_iterations):
yield Val(i, base ** i)
class Obj(object):
def __init__(self, n, v):
self.n = n
self.v = v
class MyObject(object):
def __init__(self, name):
self.field = name
def get_field(self):
return self.field
def groupjoin_func(r):
return (r[0], r[1].count())
|
import re
import json
from collections import MutableMapping
from smooch.error import SmoochError
snake_case = re.compile('(?<!^)_([a-z0-9])')
class SmoochResource(MutableMapping):
def __init__(self, api=None, *args, **kwargs):
self._api = api
all_args = kwargs
if len(args) > 0 and type(args[0]) == dict:
all_args.update(args[0])
all_args = dict(
map(lambda kv: (self._key_transform(kv[0]), kv[1]),
all_args.items()))
# validate optional and required args
optional_attrs = self._optional_attrs()
required_attrs = self._required_attrs()
all_attrs = optional_attrs.union(required_attrs)
unknown_args = set(all_args.keys()).difference(all_attrs)
if unknown_args:
raise ValueError("'%s' recieved invalid argument(s): '%s'" %
(type(self).__name__, ', '.join(unknown_args)))
missing_args = required_attrs.difference(set(all_args.keys()))
if missing_args:
raise ValueError("'%s' missing argument(s): '%s'" %
(type(self).__name__, ', '.join(missing_args)))
self._store = dict()
self.update(all_args)
def __setattr__(self, key, value):
if key[0] == '_' or key in self.__dict__:
return super(SmoochResource, self).__setattr__(key, value)
else:
self._store[self._key_transform(key)] = value
def __getattr__(self, key):
if key[0] == '_':
raise AttributeError(key)
try:
return self._store[self._key_transform(key)]
except KeyError:
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, key))
def __delattr__(self, key):
del self._store[key]
def __getitem__(self, key):
return self._store[self._key_transform(key)]
def __setitem__(self, key, value):
self._store[self._key_transform(key)] = value
def __delitem__(self, key):
del self._store[self._key_transform(key)]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def _key_transform(self, key):
# camelize keys, eg given_name --> givenName
return re.sub(snake_case, lambda pat: pat.group(1).upper(), key)
@classmethod
def _optional_attrs(cls):
return set()
@classmethod
def _required_attrs(cls):
return set()
def json(self):
return json.dumps(self._store)
def dict(self):
return self._store
class AppUser(SmoochResource):
@classmethod
def _optional_attrs(cls):
return set(['_id', 'givenName', 'surname', 'email', 'signedUpAt',
'conversationStarted', 'properties', 'credentialRequired'])
def get_conversation(self):
path = '/appusers/%s/conversation' % (self._store['_id'])
res = self._api.get(path)
if (res.status_code != 200):
raise SmoochError('Get conversation failed', res)
return Conversation(api=self._api, **res.json()['conversation'])
class Device(SmoochResource):
def __init__(self, *args, **kwargs):
super(Device, self).__init__(*args, **kwargs)
if not hasattr(self, 'platform'):
self.platform = 'other'
@classmethod
def _required_attrs(cls):
return set(['id'])
@classmethod
def _optional_attrs(cls):
return set(['platform'])
class Conversation(SmoochResource):
@classmethod
def _required_attrs(cls):
return set(['_id', 'messages', 'appMakers', 'appUsers'])
class Message(SmoochResource):
@classmethod
def _required_attrs(cls):
return set(['_id', 'text', 'authorId', 'role'])
@classmethod
def _optional_attrs(cls):
return set(['name', 'avatarUrl'])
|
"""
Tools used by the `explore_ligpy_results.ipynb` notebook that help with
analysis and plotting.
"""
import os
import cPickle as pickle
import numpy as np
from constants import MW
def load_results(path):
"""
Load the results from the ODE solver, along with the program parameters
used to generate those results. The program parameters should be saved
in the `prog_params.pkl` file generated by `ligpy.py`. The model species
concentration results should be in the same format as those output by
DDASAC (see the ligpy/sample_files/ folder for an example).
Parameters
----------
path : str
path to the folder that contains `/results_dir/`, where the *.out
files (model results) and `prog_params.pkl` are saved.
Returns
-------
end_time : float
the pyrolysis end time in seconds (excludes cool-down
time)
output_time_step : float
the time step at which results were saved (sec)
initial_T : float
initial temperature (K)
heating_rate : float
the heating rate (K/min)
max_T : float
maximum temperature of pyrolysis (K)
atol : float
absolute tolerance used by the ODE solver
rtol : float
relative tolerance used by the ODE solver
plant : str
the name of the lignin species modeled
cool_time : int
the time (s) to cool down after an isothermal hold
y : numpy matrix
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
T : numpy array
array with the temperature (K) at every time in `t`
specieslist : list
a list of all the species participating in the model
speciesindices : dict
dictionary where species names are keys and values are
the index in `y` that corresponds to that species
indices_to_species : dict
the opposite of speciesindices
"""
rpath = path + '/results_dir'
if not os.path.exists(rpath):
raise ValueError('Please specify a valid directory with a'
' results_dir folder.')
with open(rpath + '/prog_params.pkl', 'rb') as params:
prog_params = pickle.load(params)
end_time = prog_params[0]
output_time_step = prog_params[1]
initial_T = prog_params[2]
heating_rate = prog_params[3]
max_T = prog_params[4]
atol = prog_params[5]
rtol = prog_params[6]
plant = prog_params[7]
cool_time = prog_params[8]
if not os.path.isfile(rpath + '/ddasac_results_1.out'):
raise IOError('There is not a valid DDASAC .out file.')
# Determine the order that species are listed in the DDASAC model.c file
with open(path + '/model.c', 'rb') as modelc:
body = modelc.read()
spos = body.find('enum {')
modelc.seek(spos+6)
# this includes the species list that I want to find
listiwant = modelc.read(1000)
# this is the list of all the species in the DDASAC model
species_ddasac = ''
for i, char in enumerate(listiwant):
if char == '}':
species_ddasac = listiwant[:i]
break
# Build a list of species from this string of species
species_ddasac = species_ddasac.replace('\n', '').replace(' ', '')
specieslist_ddasac = []
for s in species_ddasac.split(','):
specieslist_ddasac.append(s)
# Build dictionaries of corresponding indices (these indices from DDASAC's
# output are different from those from `ligpy_utils.get_speciesindices()`)
speciesindices_ddasac = {}
for i, species in enumerate(specieslist_ddasac):
speciesindices_ddasac[species] = i
indices_to_species_ddasac = dict(zip(speciesindices_ddasac.values(),
speciesindices_ddasac.keys()))
# Sort to make sure legends will always be the same
specieslist_ddasac.sort()
# Read the first DDASAC results file
file1 = rpath + '/ddasac_results_1.out'
t, y, T = read_results_files(file1, specieslist_ddasac)
# Check to see if a temperature ramp was followed by an isothermal stage
try:
file2 = rpath + '/ddasac_results_2.out'
t2, y2, T2 = read_results_files(file2, specieslist_ddasac)
y = np.concatenate((y, y2[1:]))
t = np.concatenate((t, t[-1]+t2[1:]))
T = np.concatenate((T, T2[1:]))
except IOError:
print 'There is not a second DDASAC results file (isothermal hold)'
# Check to see if a cool down phase was included
try:
file3 = rpath + '/ddasac_results_3.out'
t3, y3, T3 = read_results_files(file3, specieslist_ddasac)
y = np.concatenate((y, y3[1:]))
t = np.concatenate((t, t[-1]+t3[1:]))
T = np.concatenate((T, T3[1:]))
except IOError:
print 'There is not a third DDASAC results file (cool down period)'
return [end_time, output_time_step, initial_T, heating_rate, max_T, atol,
rtol, plant, cool_time, y, t, T, specieslist_ddasac,
speciesindices_ddasac, indices_to_species_ddasac]
def read_results_files(filename, specieslist_ddasac):
"""
Read and process the DDASAC *.out results files so they can be
combined.
Parameters
----------
filename : str
the filename of the *.out file (including relative
or absolute path)
specieslist_ddasac : list
the specieslist_ddasac object from load_results()
Returns
-------
t : numpy array
an array with the output time (s) for each entry in the
concentration or temperature arrays
y : numpy matrix
a matrix with the concentrations of each species in the model for
every timepoint in `t` (mol/L)
T : numpy array
an array with the temperature at evey timepoint in `
"""
with open(filename, 'r') as result:
# There are 6 lines of descriptive text at the end of file
num_lines = sum(1 for line in result) - 7
t = np.zeros((num_lines, 1), dtype='float64')
T = np.zeros((num_lines, 1), dtype='float64')
y = np.zeros((num_lines, len(specieslist_ddasac)), dtype='float64')
with open(filename, 'r') as result:
for i, line in enumerate(result.readlines()):
if 1 <= i < num_lines + 1:
t[i-1] = line.split('\t')[0].split(' ')[1]
T[i-1] = line.split('\t')[-2]
for j, concentration in enumerate(line.split('\t')[1:-2]):
y[i-1, j] = concentration
return t, y, T
def tar_elem_analysis(speciesindices, y, t, t_choice='end'):
"""
Calculate the elemental analysis of the tar fraction at a specified time
(moles of C, H, O). The species that make up the tar fraction are specified
in the MW dictionary (in `constants.py`). This function also returns the
wt% and mol% of C, H, O at that specified time.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
t_choice : str or int, optional
if 'end' (default) then this elemental analysis will be
done at the last timepoint saved in the simulation (i.e.
after any isothermal or cool-down stage). Otherwise, an
integer specifying the index of the `t` array can be
passed to do the analysis at a specified time.
Returns
-------
ea0 : numpy array
the elemental analysis at time = 0
ea : numpy array
the elemental analysis of tars at the specified time
ea0_molpercent : numpy array
the mole% of C, H, O at time = 0
ea_molpercent : numpy array
the mole% of C, H, O at the specified time
ea0_wtpercent : numpy array
the wt% of C, H, O at time = 0
ea_wtpercent : numpy array
the wt% of C, H, O at the specified time
choice : str
a string describing the time that was chosen for analysis
t_index : int
the index of the time array at which analysis was done
"""
# Calculate the elemental analysis at time=0
ea0 = np.zeros(3)
for species in MW:
if y[0, speciesindices[species]] != 0:
# mol C/L, mol H/L, mol O/L
# NOTE: in MW dict, only PLIGH, PLIGO, PLIGC contribute to ea0
ea0[0] += y[0, speciesindices[species]] * MW[species][3][0]
ea0[1] += y[0, speciesindices[species]] * MW[species][3][1]
ea0[2] += y[0, speciesindices[species]] * MW[species][3][2]
# Calculate the elemental analysis at some later time
if t_choice == 'end':
t_index = len(t) - 1
choice = 'Analysis done at the end of the entire simulation.'
else:
t_index = t_choice
choice = 'Analysis done at time = %s sec.' % t[t_index]
ea = np.zeros(3)
for species in MW:
if MW[species][1] in set(['t', 'lt', 'H2O']):
# mol C,H,O/L
ea[0] += y[t_index, speciesindices[species]] * MW[species][3][0]
ea[1] += y[t_index, speciesindices[species]] * MW[species][3][1]
ea[2] += y[t_index, speciesindices[species]] * MW[species][3][2]
ea0_molpercent = ea0 / ea0.sum()
ea_molpercent = ea / ea.sum()
# Convert to g/L for calculating wt%
ea_g = ea * [12.011, 1.0079, 15.999]
ea0_g = ea0 * [12.011, 1.0079, 15.999]
ea_wtpercent = ea_g / ea_g.sum()
ea0_wtpercent = ea0_g / ea0_g.sum()
return (ea0, ea, ea0_molpercent, ea_molpercent, ea0_wtpercent,
ea_wtpercent, choice, t_index)
def C_fun_gen(fractions, speciesindices, y, time):
"""
Calculate the distribution of carbon functional groups as a percent of
total carbon.
Parameters
----------
fractions : list
The lumped phases that you want to include (as specified
in MW['species'][1], options are any subset of
['g','s','lt','t','char','H20','CO','CO2'] or ['initial']
for the case when you want to determine the initial
distribution before pyrolysis)
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
time : int
the index of the timepoint that you want the results for
Returns
-------
C_fun : numpy array
the distribution of carbon functional groups as a percent of total
carbon. The order of the elements in the array is:
carbonyl, aromatic C-O, aromatic C-C, aromatic C-H, aliphatic C-O,
aromatic methoxyl, aliphatic C-C
"""
C_fun = np.zeros(7)
ind = speciesindices
for species in MW:
if fractions == ['initial']:
time = 0
if y[time, speciesindices[species]] != 0:
# moles of functional group/L (order from Return docstring)
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
else:
if MW[species][1] in set(fractions):
C_fun[0] += y[time, ind[species]] * MW[species][4][0]
C_fun[1] += y[time, ind[species]] * MW[species][4][1]
C_fun[2] += y[time, ind[species]] * MW[species][4][2]
C_fun[3] += y[time, ind[species]] * MW[species][4][3]
C_fun[4] += y[time, ind[species]] * MW[species][4][4]
C_fun[5] += y[time, ind[species]] * MW[species][4][5]
C_fun[6] += y[time, ind[species]] * MW[species][4][6]
C_fun /= C_fun.sum()
return C_fun
def lump_species(speciesindices, m):
"""
Lump the molecular species in the model into subsets of
solids, tars, and gases. Also separate heavy tars into
phenolic and syringol families.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
m : numpy array
a matrix with the mass fraction of each species in the
kinetic scheme for every time in `t`
Returns
-------
lumped : numpy array
each row in this array is the mass fraction of a
lumped phase (0 = solid, 1 = heavy tar, 2 = light tar,
3 = gas, 4 = CO, 5 = CO2, 6 = H2O, 7 = char)
phenolic_families : numpy array
Splits the heavy tar components into the phenol
family (first row) and syringol family (second row)
morelumped : numpy array
a "more lumped" version of `lumped` where
column 0 = solids, 1 = tars, 2 = gases
"""
lumped = np.zeros((m.shape[0], 8))
phenolic_families = np.zeros((m.shape[0], 2))
for species in MW:
if MW[species][1] == 's':
lumped[:, 0] += m[:, speciesindices[species]]
elif MW[species][1] == 't':
lumped[:, 1] += m[:, speciesindices[species]]
if MW[species][2] == 'p':
phenolic_families[:, 0] += m[:, speciesindices[species]]
elif MW[species][2] == 's':
phenolic_families[:, 1] += m[:, speciesindices[species]]
elif MW[species][1] == 'lt':
lumped[:, 2] += m[:, speciesindices[species]]
elif MW[species][1] == 'g':
lumped[:, 3] += m[:, speciesindices[species]]
elif MW[species][1] == 'CO':
lumped[:, 4] += m[:, speciesindices[species]]
elif MW[species][1] == 'CO2':
lumped[:, 5] += m[:, speciesindices[species]]
elif MW[species][1] == 'H2O':
lumped[:, 6] += m[:, speciesindices[species]]
elif MW[species][1] == 'char':
lumped[:, 7] += m[:, speciesindices[species]]
else:
print '%s does not have a phase defined.' % species
# Make a more lumped (3 component) model
morelumped = np.zeros((m.shape[0], 3))
morelumped[:, 0] = lumped[:, 0] + lumped[:, 7]
morelumped[:, 1] = lumped[:, 1] + lumped[:, 2] + lumped[:, 6]
morelumped[:, 2] = lumped[:, 3] + lumped[:, 4] + lumped[:, 5]
return lumped, phenolic_families, morelumped
def generate_report(speciesindices, specieslist, y, m, t, which_result):
"""
Print a descriptive summary of a specific simulation.
Parameters
----------
speciesindices : dict
dictionary from `load_results()` where species names are
keys and values are the index in `y` that corresponds to
that species
specieslist : list
the specieslist_ddasac object from load_results()
y : numpy array
a matrix with the concentrations of each species in the
kinetic scheme for every time in `t` (mol/L)
m : numpy array
a matrix with the mass fraction of each species in the
kinetic scheme for every time in `t`
t : numpy array
array with all the times (s) corresponding to entries in
`y` and `T`
which_result : str
the name of the simulation folder you are analysing
Returns
-------
t_index : int
the index of `t` where this analysis was performed
"""
(ea0, ea, ea0_molpercent, ea_molpercent, ea0_wtpercent, ea_wtpercent, choice,
t_index) = tar_elem_analysis(speciesindices, y, t)
# Header and elemental analysis results
print1 = ('\n{:-^80}\n'
'Analysis of folder: {}\n'
'{}\n'
'\n{:.^80}\n\n'
'Feedstock (wt%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
'Bio-oil (wt%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n\n'
'Feedstock (mol%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
'Bio-oil (mol%) : {:.1%} C {:>7.1%} H {:>7.1%} O\n'
.format(' REPORT ', which_result.value, choice,
' Elemental Analysis ', ea0_wtpercent[0], ea0_wtpercent[1],
ea0_wtpercent[2], ea_wtpercent[0], ea_wtpercent[1],
ea_wtpercent[2], ea0_molpercent[0], ea0_molpercent[1],
ea0_molpercent[2], ea_molpercent[0], ea_molpercent[1],
ea_molpercent[2]))
# H:C ratio in tar
# a low H:C ratio limits hydrocarbon yield during upgrading, so upgraded
# product is primarily aromatics. Combustion energetics can be estimated from
# the bond energies for all the classifications of fossil fuels. The amount of
# energy released is dependent on the oxidation state of the carbons in the
# hydrocarbon which is related to the hydrogen/carbon ratio. The more hydrogen
# per carbon, the lower the oxidation state and the more energy that will be
# released during the oxidation reaction. Thus the greater the H/C ratio,
# the more energy release on combustion.
# Sample values: gas 4/1, petroleum 2/1, coal 1/1, ethanol 3/1
print2 = '\nH:C ratio of tar = {:.3}\n'.format(ea[1] / ea[0])
# Moisture content in tar -- typical value for wood bio-oil is 25%
mtot = [0]
for species in MW:
if MW[species][1] in set(['t', 'lt', 'H2O']):
# the total mass fraction of all tar components at the specified time
mtot += m[t_index, speciesindices[species]]
# The moisture content (wt%) in the bio-oil
mc = m[t_index, speciesindices['H2O']] / mtot
print3 = '\nMoisture content of tar (wt%) = {:.1%}\n'.format(mc[0])
# The distribution of carbon functional groups in the tar
groups = ['C=O', 'aromatic C-O', 'aromatic C-C', 'aromatic C-H',
'aliphatic C-O', 'aromatic Methoxyl', 'aliphatic C-C']
Cfun0 = C_fun_gen(['initial'], speciesindices, y, 0)
Cfun = C_fun_gen(['t','lt'], speciesindices, y, t_index)
Cfunheavy = C_fun_gen(['t'], speciesindices, y, t_index)
Cfunlight = C_fun_gen(['lt'], speciesindices, y, t_index)
print4 = ('\n{:.^80}\n\n'
'{: <19}{: <16}{: <16}{: <16}{: <16}'
.format(' Distribution of C-functional groups (shown as % of C) ',
' ','Feedstock','Bio-oil','Heavy oil','Light oil'))
print print1, print2, print3, print4
for i, group in enumerate(groups):
print5 = ('%s%s%s%s%s' % ('{: <19}'.format(group),
'{: <16.2%}'.format(Cfun0[i]),
'{: <16.2%}'.format(Cfun[i]),
'{: <16.2%}'.format(Cfunheavy[i]),
'{: <16.2%}'.format(Cfunlight[i])))
print print5
# lump the molecules in the model into groups
lumped, phenolic_families, morelumped = lump_species(speciesindices, m)
# The final mass fractions of each component (morelumped)
print6 = ('\n{:.^80}\n\n'
'Solids:\t\t {:>10.2%}\n'
'Gases:\t\t {:>10.2%}\n'
'Total Tar:\t {:>10.2%}\n'
' Heavy Tars:\t {:>10.2%}\n'
' Light Tars:\t {:>10.2%}\n'
' Water:\t {:>10.2%}'
.format(' Final lumped product yields (wt%) ', morelumped[-1, 0],
morelumped[-1, 2], morelumped[-1, 1], lumped[-1, 1],
lumped[-1, 2], lumped[-1, 6]))
print7 = ('\n\n{:.2%} of heavy tars are derived from phenol, '
'{:.2%} are derived from syringol'
.format(phenolic_families[-1, 0] / lumped[-1, 1],
phenolic_families[-1, 1] / lumped[-1, 1]))
# Look at the final distribution of gases
print8 = '\n\n{:.^80}\n'.format(' Final gas composition (wt%) ')
print print6, print7, print8
# dictionary with the ending mass fraction for each species
final_mass_fracs = {}
for species in specieslist:
final_mass_fracs[species] = m[t_index, speciesindices[species]]
gas_list = {}
for species in specieslist:
if MW[species][1] in ('g', 'CO', 'CO2'):
gas_list[species] = final_mass_fracs[species]
gas_w = sorted(gas_list, key=gas_list.__getitem__, reverse=True)[:8]
for species in gas_w:
print9 = ('%s\t%s' % ('{0: <8}'.format(species),
'{0: <18}'.format(final_mass_fracs[species])))
print print9
# identify the 20 species with the highest mass fractions at the end
print10 = ('\n{:.^80}\n'
.format(' Top 20 species (by mass fraction) at end (wt%) '))
print print10
top = sorted(final_mass_fracs, key=final_mass_fracs.__getitem__,
reverse=True)[:20]
for species in top:
print11 = '%s\t%s' % ('{0: <8}'.format(species),
'{0: <18}'.format(final_mass_fracs[species]))
print print11
return t_index
|
"""-----------------
Opgave 3
Anel Busuladzic
-----------------"""
A = 7+5
B = 16-4
C = 6*2
D = 24//2
E = int(24/2)
print(A)
print(B)
print(C)
print(D)
print(E)
produkt2 = 2**4
print(produkt2) |
import json
import pickle
import threading
import time
# file_x = 'E:/DataSet/data/features_noise.dat'
# file_y = 'E:/DataSet/data/label_class_0.dat'
#
# X = numpy.genfromtxt(file_x, delimiter=' ')
# y = numpy.genfromtxt(file_y, delimiter=' ')
# Y = numpy.array([i for i in range(X.shape[1])])
# print(X.shape[1])
# print(X.shape[0])
# print(Y)
# from itertools import combinations
#
# import numpy
#
# X = numpy.array([[1,2,3,4,5],[2,3,4,5,3]])
# Y = numpy.array([i for i in range(X.shape[1])])
# print(Y)
# test = combinations(Y[0:3],r =3)
#
# for el in test:
# print(el)
#!/usr/bin/python3
import _thread
import time
#
# # 为线程定义一个函数
# def print_time( threadName, delay):
# count = 0
# while count < 5:
# time.sleep(delay)
# count += 1
# print ("%s: %s" % ( threadName, time.ctime(time.time()) ))
#
# def f( str ="hello"):
#
# count = 0
# while count < 5:
# time.sleep(2)
# print(str)
#
# # 创建两个线程
# try:
# _thread.start_new_thread(f,("Thread-1",))
# _thread.start_new_thread(f,("Thread-2",))
# except:
# print ("Error: 无法启动线程")
#
# while 1:
# pass
# exitFlag = 0
#
# class myThread (threading.Thread):
# def __init__(self, threadID, name, counter):
# threading.Thread.__init__(self)
# self.threadID = threadID
# self.name = name
# self.counter = counter
# def run(self):
# print ("开始线程:" + self.name)
# print_time(self.name, self.counter, 5)
# print ("退出线程:" + self.name)
#
# def print_time(threadName, delay, counter):
# while counter:
# if exitFlag:
# threadName.exit()
# time.sleep(delay)
# print ("%s: %s" % (threadName, time.ctime(time.time())))
# counter -= 1
#
# # 创建新线程
# thread1 = myThread(1, "Thread-1", 1)
# thread2 = myThread(2, "Thread-2", 2)
#
# # 开启新线程
# thread1.start()
# thread2.start()
# thread1.join()
# thread2.join()
# print ("退出主线程")
# <class 'dict'>
# {'2': ((0, 1), 0.55625), '3': ((0, 1, 2), 0.56796875)}
f1 = open('E:/DataSet/data/SBS/clear_valence_18fea.json','r')
f2 = open('E:/DataSet/data/SBS/noise_valence.json','r')
f3 = open('E:/DataSet/data/SBS/clear_arousal_18fea.json','r')
f4 = open('E:/DataSet/data/SBS/noise_arousal.json','r')
f1c = json.load(f1)
f2c = json.load(f2)
f3c = json.load(f3)
f4c = json.load(f4)
for i in range(2,18):
print(str(i)+" features: ")
print("valence:")
print("clear")
print(f1c[str(i)])
# print("noise")
# print(f2c[str(i)])
print("arousal:")
print("clear")
print(f3c[str(i)])
# print("noise")
# print(f4c[str(i)])
# print()
f1.close()
f2.close()
f3.close()
f4.close()
|
#!/usr/bin/env python
# coding: utf-8
# # Punto 6. Taller ecuaciones en diferencias
# # Los números de Lucas están relacionado con los números de Fibonacci, y están definidos por la siguiente secuencia 𝐿𝑛+2=𝐿𝑛+1+𝐿𝑛, 𝐿0=2, 𝐿1=1. Escriba un programa que imprima la siguiente información. El 18-th número de Lucas, el número de Lucas más cercano a 1000, y el primer número de Lucas más grande que 100.
# In[1]:
l0 = 2
l1 = 1
iterador = 0
i = 0
lista1 = []
for i in range (2,18):
iterador = l0 + l1
l0 =l1
l1 = iterador
lista1.append(iterador)
print("Los 18 primeros numeros de Lucas son:")
print(lista1)
print("_________________________________________________")
print("Parte 1: Numero 18 de Lucas")
print("_________________________________________________")
print("El 18 es:")
print(iterador)
######################################
######Restableciendo valores #########
######################################
print("_________________________________________________")
print("Parte 2: Número de Lucas mas cercano a mil")
print("_________________________________________________")
l02 = 2
l12 = 1
iterador2 = 0
minimo = 0
maximo = 0
c = 0
while (maximo == 0):
iterador2 = l02 + l12
l02 = l12
l12 = iterador2
if (iterador2 < 1000):
minimo = iterador2
else:
maximo = iterador2
c = c+1
varA = maximo-1000
varB = 1000-minimo
#print("VarA:")
#print(varA)
#print("VarB")
#print(varB)
if ((varA)<(varB)):
print("Maximo: ")
print(maximo)
else:
print("El mas cercano a mil: ")
print(minimo)
print("_________________________________________________")
print("Parte 3: El primer número de Lucas mayor que 100")
print("_________________________________________________")
l03 = 2
l13 = 1
iterador3 = 0
while(iterador3<=100):
iterador3 = l03+l13
l03 = l13
l13 = iterador3
print("El primero mayor que 100 es")
print(iterador3)
#
|
# -*- coding: utf-8 -*-
# Original Code : https://github.com/alrojo/CB513/blob/master/data.py
import os
import numpy as np
import subprocess
from utils import load_gz, save_text, save_picke
TRAIN_PATH = '../pssp-data/cullpdb+profile_6133_filtered.npy.gz'
TEST_PATH = '../pssp-data/cb513+profile_split1.npy.gz'
TRAIN_URL = "http://www.princeton.edu/~jzthree/datasets/ICML2014/cullpdb+profile_6133_filtered.npy.gz"
TEST_URL = "http://www.princeton.edu/~jzthree/datasets/ICML2014/cb513+profile_split1.npy.gz"
AA_PATH = lambda key : f'../pssp-data/aa_{key}.txt'
SP_PATH = lambda key : f'../pssp-data/sp_{key}.pkl'
PSS_PATH = lambda key : f'../pssp-data/pss_{key}.txt'
def download_dataset():
print('[Info] Downloading CB513 dataset ...')
if not (os.path.isfile(TRAIN_PATH) and os.path.isfile(TEST_PATH)):
os.makedirs('../pssp-data', exist_ok=True)
os.system(f'wget -O {TRAIN_PATH} {TRAIN_URL}')
os.system(f'wget -O {TEST_PATH} {TEST_URL}')
def make_datasets():
print('[Info] Making datasets ...')
# train dataset
X_train, y_train, seq_len_train = make_dataset(TRAIN_PATH)
make_dataset_for_transformer(X_train, y_train, seq_len_train, 'train')
# test dataset
X_test, y_test, seq_len_test = make_dataset(TEST_PATH)
make_dataset_for_transformer(X_test, y_test, seq_len_test, 'test')
def make_dataset(path):
data = load_gz(path)
data = data.reshape(-1, 700, 57)
idx = np.append(np.arange(21), np.arange(35, 56))
X = data[:, :, idx]
X = X.transpose(0, 2, 1)
X = X.astype('float32')
y = data[:, :, 22:30]
y = np.array([np.dot(yi, np.arange(8)) for yi in y])
y = y.astype('float32')
mask = data[:, :, 30] * -1 + 1
seq_len = mask.sum(axis=1)
seq_len = seq_len.astype(int)
return X, y, seq_len
def make_dataset_for_transformer(X, y, seq_len, key):
X_amino = X[:, :21, :]
X_profile = X[:, 21:, :]
amino_acid_array = get_amino_acid_array(X_amino, seq_len)
save_path = AA_PATH(key)
save_text(amino_acid_array, save_path)
print(f'[Info] Saved amino_acid_array for {key} in {save_path}')
seq_profile = get_seq_profile(X_profile, seq_len)
save_path = SP_PATH(key)
save_picke(seq_profile, save_path)
print(f'[Info] Saved seq_profile for {key} in {save_path}')
pss_array = get_pss_array(y, seq_len)
save_path = PSS_PATH(key)
save_text(pss_array, save_path)
print(f'[Info] Saved pss_array for {key} in {save_path}')
def get_amino_acid_array(X_amino, seq_len):
amino_acid = ['A', 'C', 'E', 'D', 'G', 'F', 'I', 'H', 'K', 'M',
'L', 'N', 'Q', 'P', 'S', 'R', 'T', 'W', 'V', 'Y', 'X']
amino_acid_array = []
for X, l in zip(X_amino, seq_len):
acid = {}
for i, aa in enumerate(amino_acid):
keys = np.where(X[i] == 1)[0]
values = [aa] * len(keys)
acid.update(zip(keys, values))
aa_str = ' '.join([acid[i] for i in range(l)])
amino_acid_array.append(aa_str)
return amino_acid_array
def get_pss_array(label, seq_len):
pss_icon = ['L', 'B', 'E', 'G', 'I', 'H', 'S', 'T']
pss_array = []
for target, l in zip(label, seq_len):
pss = np.array(['Nofill'] * l)
target = target[:l]
for i, p in enumerate(pss_icon):
idx = np.where(target == i)[0]
pss[idx] = p
pss_str = ' '.join([pss[i] for i in range(l)])
pss_array.append(pss_str)
return pss_array
def get_seq_profile(X_profile, seq_len):
seq_profile = []
for sp, l in zip(X_profile, seq_len):
seq_profile.append(sp[:, :l])
return seq_profile
|
import collections
file = (open('input.txt').read()).split()
frequency = collections.Counter(file)
f = open("output.txt","a")
for i in sorted(frequency):
f.write(i + ' : ' + str(frequency[i]) + '\n') |
# file name : __init__.py
from flask import Flask, request, redirect, url_for
from flask import render_template
import apiCall
# 가나다라마바사
app = Flask(__name__, static_folder='static', static_url_path='/static')
@app.route("/")
def hello():
return render_template("home.html")
@app.route("/home")
def home():
return redirect("/")
@app.route("/introduction")
def introduction() :
return render_template("usingAPI.html")
@app.route("/location")
def search_list():
return render_template("index.html")
@app.route("/post", methods=['post'])
def post():
value = request.form['input']
return redirect("/location/" + value)
@app.route("/admin/update")
def update() :
info = apiCall.apiCall()
info.fishCreate()
return redirect("/")
@app.route("/location/<value>")
def findFishing(value) :
info = apiCall.apiCall()
FFF = info.fishFind(value)
fish_dict ={}
for j in FFF:
fish_dict[j['fname']] = [j['flatitude'], j['flongitude']]
return render_template("index_list.html", data = FFF, location = value, fdic = fish_dict
,temp=info.getData('temp'), fishList=info.getData('fishList')
)
if __name__ == "__main__":
app.run(host='0.0.0.0') |
import json
import os
import requests
'''
回顾:注释,输入和输出,标识符,保留字,字符串,数字,// ,/ %
切片
今日:
'''
# str1='hello'
# print(str1.upper()) #将字符串大写
# print(str1.capitalize()) #将首字母大写
# print(str1.lower()) #将字符串小写
''''''
'''假设一个字符串的长度为单数,将最中间的那个字符大写
比如:hello heLlo 5//2 =2
welcome welCome 7//2 =3
9//2=4
'''
# name='welcomethyj'
# length=len(name)//2 #求这个字符串的长度
# re=name[:length]+name[length:length+1].upper()+name[length+1:]
# print(re)
'''
join:以指定的字符连接字符串,
os.path.join()
split: 拆分一个字符串,
'''
# str1='hello'
# print('-'.join(str1))
# path=os.path.join('/user/','destdop/','python')
# print(path)
'''
'''
# str1='welcome[15212412]denglu'
# newstr=str1.split('[')[1].split(']')[0]
# print(newstr)
'''布尔值:True,False
任何非0: 0
非空:None
长度非0:
的东西是真 ,bool方法返回
'''
# a=[1]
# print(bool(a))
'''
逻辑运算符
and x and y 如果x为false,返回X,如果x为True,返回的是Y
or x or Y 如果x为真,返回X,如果x为假,返回Y
not
'''
# x=1
# y=0
# z=x or y
# print("z的值是什么:{}".format(z))
# print("z的布尔类型是:{}".format(bool(z)))
# print(not y)
'''
in not in
'''
# str1='hello'
#
# print('h' not in str1)
'''is
is not
'''
a,b=3,5
print("a的内存地址:{1},b的内存地址是:{0}".format(id(b),id(a)))
print(a is not b)
'''
数组:list,用[]表示:一种有序的集合,可以随时添加和删除其中的元素
索引;从0开始
长度:len(list)
type:查看类型,isinstance:返回数据类型的布尔值
最后一个元素的索引:length-1
list里面的元素可以是不同的数据类型
列表也可以切片,和字符串切片一样的
'''
# print(isinstance(list1,str))
# print('list1的类型是:{},它的长度是:{},它的第二个元素是:{}'.format(type(list1),len(list1),list1[2]))
# print(type(list1[len(list1)-1]))
# print('list1的最后一个元素里面的第三个元素是:{}'.format(list1[4][2]))
list1=['tom','jack',8,3.14159,[1,2,3]]
# str1='abc'
# list2=['a','b','c']
#print(list1.clear()) #清除列表里面所有的元素
#print(list1.reverse()) #颠倒顺序
# list1.append('fei') #增加一个元素,默认加在列表最后
# list1.insert(3,'hao') #增加指定位置的元素
# list1.extend(list2) #把一个序列里面的内容添加到列表中
#list1.remove(8) #删除指定的元素
#list1.pop(1) #删除指定索引的元素
#list3=list1.copy() #复制列表
#print("list1修改之后为:{}".format(list3))
#print(list1[2:4])
'''元组:用括号表示,和列表类型,不同之处元素不能修改
'''
tp=(1,2,['a','b','c'])
print(tp)
print('tp第二个元素,修改之前的地址',id(tp[2]))
tp[2].append('d')
print('tp第二个元素,修改之后的地址',id(tp[2]))
print(type(tp))
print(isinstance(tp,type(tp)))
'''字典,set
'''
dic={"cao":"1","ri":"2"}
print(dic.keys())
print(dic.items())
print(dic.values())
print(dic.popitem())
dic["fuck"]="33"
print(dic["fuck"])
_json=json.dumps(dic)# to Json
print(_json)
print(dic.get("fuck"))
dic.pop("fuck")
print(dic)
# response=requests.get() #invoke get
# json.load(response.text) # toJson
|
#WAP to perform stack opertions on list
#!/usr/bin/python
def push(list1,add):
return(list1.append(add))
def pop(list1):
list1.pop()
return list1
def peep(list1):
return list1[-1]
def isFull(list1):
return len(list1)==10
def isEmpty(list1):
return list1==[]
def main():
list1=eval(input("Enter List:"))
ch=1
while ch!=0:
print("1.Push\n2.Pop\n3.Peep\n4.Is Full\n5.Is Empty")
ch=int(input("Enter Choice:"))
if ch==1:
add=eval(input("Enter Number to PUSH:"))
push(list1,add)
print(list1)
if ch==2:
pop(list1)
print(list1)
if ch==3:
print(peep(list1))
if ch==4:
print(isFull(list1))
if ch==5:
print(isEmpty(list1))
if __name__=='__main__':
main()
|
# Author: Iuri Diniz (UFOP)
# Date: 03/2021
import igraph as ig
import sys
# input: g, alpha
# output: g
# pij = (1 - (wij/si))**(ki - 1), where:
# w --> weight
# s --> strength
# k --> degree
def backbone(g,alpha):
p={}
adj = g.get_adjacency()
n_nodes = g.vcount()
s = g.strength(weights=g.es['weight']) # calcula o strength de toda a rede
for i in range(n_nodes):
for j in range(n_nodes):
if (adj[i,j] == 1): # verificando se há conexão entre o par i,j
w = g.es[g.get_eid(i,j)]['weight']
k = g.vs[i].degree()
pij = (1 - (w/s[i]))**(k-1)
if (pij < alpha): # aplicando a relação com o alpha
p[i,j] = pij
# Pegando os pares simétricos e atribuindo -99 ao par com maior valor de probabilidade
for i in p:
if ((i[0],i[1]) and (i[1],i[0])) in p.keys():
if p[i[0],i[1]] <= p[i[1],i[0]]:
p[i[1],i[0]] = -99
else:
p[i[0],i[1]] = -99
# Criando dicionário com os pares válidos
pij = {}
for i in p:
if not (p[i] == -99):
pij[i] = p[i]
return(pij)
# MAIN CODE
# The network name comes from command line.
net_name = sys.argv[1]
g = ig.Graph.Read_GraphML('../networks/' + net_name + '.GraphML')
# Set parameter alpha
alpha = 0.04
prob=(backbone(g,alpha))
new_g = ig.Graph()
new_g.add_vertices(g.vcount())
new_g.add_edges(list(prob.keys()))
# Nomes das zonas
#new_g.vs['label'] = g.vs['label']
# coordendas
new_g.vs["X"] = g.vs["X"]
new_g.vs["Y"] = g.vs["Y"]
# tamanho dos nós dependendo do grau
new_g.vs["size"] = g.vs["size"]
new_g.write_graphml('output/' + net_name + '_backbone.GraphML') |
import os
import re
import sys
import cmd
import bdb
import dis
import code
import glob
import pprint
import signal
import inspect
import traceback
import linecache
class Restart(Exception):
pass
__all__ = ['run', 'pm', 'Pdb', 'runeval', 'runctx', 'runcall', 'set_trace', 'post_mortem', 'help']
def find_function(funcname, filename):
cre = re.compile('def\\s+%s\\s*[(]' % re.escape(funcname))
try:
fp = open(filename)
except OSError:
return
with fp:
for (lineno, line) in enumerate(fp, start=1):
if cre.match(line):
return (funcname, filename, lineno)
def getsourcelines(obj):
(lines, lineno) = inspect.findsource(obj)
if inspect.isframe(obj) and obj.f_globals is obj.f_locals:
return (lines, 1)
if inspect.ismodule(obj):
return (lines, 1)
return (inspect.getblock(lines[lineno:]), lineno + 1)
def lasti2lineno(code, lasti):
linestarts = list(dis.findlinestarts(code))
linestarts.reverse()
for (i, lineno) in linestarts:
if lasti >= i:
return lineno
return 0
class _rstr(str):
def __repr__(self):
return self
line_prefix = '\n-> '
class Pdb(bdb.Bdb, cmd.Cmd):
_previous_sigint_handler = None
def __init__(self, completekey='tab', stdin=None, stdout=None, skip=None, nosigint=False, readrc=True):
bdb.Bdb.__init__(self, skip=skip)
cmd.Cmd.__init__(self, completekey, stdin, stdout)
if stdout:
self.use_rawinput = 0
self.prompt = '(Pdb) '
self.aliases = {}
self.displaying = {}
self.mainpyfile = ''
self._wait_for_mainpyfile = False
self.tb_lineno = {}
try:
import readline
readline.set_completer_delims(' \t\n`@#$%^&*()=+[{]}\\|;:\'",<>?')
except ImportError:
pass
self.allow_kbdint = False
self.nosigint = nosigint
self.rcLines = []
if readrc:
if 'HOME' in os.environ:
envHome = os.environ['HOME']
try:
with open(os.path.join(envHome, '.pdbrc')) as rcFile:
self.rcLines.extend(rcFile)
except OSError:
pass
try:
with open('.pdbrc') as rcFile:
self.rcLines.extend(rcFile)
except OSError:
pass
self.commands = {}
self.commands_doprompt = {}
self.commands_silent = {}
self.commands_defining = False
self.commands_bnum = None
def sigint_handler(self, signum, frame):
if self.allow_kbdint:
raise KeyboardInterrupt
self.message("\nProgram interrupted. (Use 'cont' to resume).")
self.set_step()
self.set_trace(frame)
def reset(self):
bdb.Bdb.reset(self)
self.forget()
def forget(self):
self.lineno = None
self.stack = []
self.curindex = 0
self.curframe = None
self.tb_lineno.clear()
def setup(self, f, tb):
self.forget()
(self.stack, self.curindex) = self.get_stack(f, tb)
while tb:
lineno = lasti2lineno(tb.tb_frame.f_code, tb.tb_lasti)
self.tb_lineno[tb.tb_frame] = lineno
tb = tb.tb_next
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
return self.execRcLines()
def execRcLines(self):
if not self.rcLines:
return
rcLines = self.rcLines
rcLines.reverse()
self.rcLines = []
while rcLines:
line = rcLines.pop().strip()
if line and line[0] != '#' and self.onecmd(line):
self.rcLines += reversed(rcLines)
return True
def user_call(self, frame, argument_list):
if self._wait_for_mainpyfile:
return
if self.stop_here(frame):
self.message('--Call--')
self.interaction(frame, None)
def user_line(self, frame):
if self._wait_for_mainpyfile:
if self.mainpyfile != self.canonic(frame.f_code.co_filename) or frame.f_lineno <= 0:
return
self._wait_for_mainpyfile = False
if self.bp_commands(frame):
self.interaction(frame, None)
def bp_commands(self, frame):
if getattr(self, 'currentbp', False) and self.currentbp in self.commands:
currentbp = self.currentbp
self.currentbp = 0
lastcmd_back = self.lastcmd
self.setup(frame, None)
for line in self.commands[currentbp]:
self.onecmd(line)
self.lastcmd = lastcmd_back
if not self.commands_silent[currentbp]:
self.print_stack_entry(self.stack[self.curindex])
if self.commands_doprompt[currentbp]:
self._cmdloop()
self.forget()
return
return 1
def user_return(self, frame, return_value):
if self._wait_for_mainpyfile:
return
frame.f_locals['__return__'] = return_value
self.message('--Return--')
self.interaction(frame, None)
def user_exception(self, frame, exc_info):
if self._wait_for_mainpyfile:
return
(exc_type, exc_value, exc_traceback) = exc_info
frame.f_locals['__exception__'] = (exc_type, exc_value)
prefix = 'Internal ' if exc_traceback or exc_type is StopIteration else ''
self.message('%s%s' % (prefix, traceback.format_exception_only(exc_type, exc_value)[-1].strip()))
self.interaction(frame, exc_traceback)
def _cmdloop(self):
while True:
try:
self.allow_kbdint = True
self.cmdloop()
self.allow_kbdint = False
break
except KeyboardInterrupt:
self.message('--KeyboardInterrupt--')
def preloop(self):
displaying = self.displaying.get(self.curframe)
if displaying:
for (expr, oldvalue) in displaying.items():
newvalue = self._getval_except(expr)
if newvalue is not oldvalue and newvalue != oldvalue:
displaying[expr] = newvalue
self.message('display %s: %r [old: %r]' % (expr, newvalue, oldvalue))
def interaction(self, frame, traceback):
if Pdb._previous_sigint_handler:
signal.signal(signal.SIGINT, Pdb._previous_sigint_handler)
Pdb._previous_sigint_handler = None
if self.setup(frame, traceback):
self.forget()
return
self.print_stack_entry(self.stack[self.curindex])
self._cmdloop()
self.forget()
def displayhook(self, obj):
if obj is not None:
self.message(repr(obj))
def default(self, line):
if line[:1] == '!':
line = line[1:]
locals = self.curframe_locals
globals = self.curframe.f_globals
try:
code = compile(line + '\n', '<stdin>', 'single')
save_stdout = sys.stdout
save_stdin = sys.stdin
save_displayhook = sys.displayhook
try:
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.displayhook = self.displayhook
exec(code, globals, locals)
finally:
sys.stdout = save_stdout
sys.stdin = save_stdin
sys.displayhook = save_displayhook
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
def precmd(self, line):
if not line.strip():
return line
args = line.split()
while args[0] in self.aliases:
line = self.aliases[args[0]]
ii = 1
for tmpArg in args[1:]:
line = line.replace('%' + str(ii), tmpArg)
ii += 1
line = line.replace('%*', ' '.join(args[1:]))
args = line.split()
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
next = line[marker + 2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def onecmd(self, line):
if not self.commands_defining:
return cmd.Cmd.onecmd(self, line)
else:
return self.handle_command_def(line)
def handle_command_def(self, line):
(cmd, arg, line) = self.parseline(line)
if not cmd:
return
if cmd == 'silent':
self.commands_silent[self.commands_bnum] = True
return
if cmd == 'end':
self.cmdqueue = []
return 1
else:
cmdlist = self.commands[self.commands_bnum]
if arg:
cmdlist.append(cmd + ' ' + arg)
else:
cmdlist.append(cmd)
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
func = self.default
if func.__name__ in self.commands_resuming:
self.commands_doprompt[self.commands_bnum] = False
self.cmdqueue = []
return 1
def message(self, msg):
print(msg, file=self.stdout)
def error(self, msg):
print('***', msg, file=self.stdout)
def _complete_location(self, text, line, begidx, endidx):
if line.strip().endswith((':', ',')):
return []
try:
ret = self._complete_expression(text, line, begidx, endidx)
except Exception:
ret = []
globs = glob.glob(text + '*')
for fn in globs:
if os.path.isdir(fn):
ret.append(fn + '/')
elif os.path.isfile(fn) and fn.lower().endswith(('.py', '.pyw')):
ret.append(fn + ':')
return ret
def _complete_bpnumber(self, text, line, begidx, endidx):
return [str(i) for (i, bp) in enumerate(bdb.Breakpoint.bpbynumber) if bp is not None and str(i).startswith(text)]
def _complete_expression(self, text, line, begidx, endidx):
if not self.curframe:
return []
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
if '.' in text:
dotted = text.split('.')
try:
obj = ns[dotted[0]]
for part in dotted[1:-1]:
obj = getattr(obj, part)
except (KeyError, AttributeError):
return []
prefix = '.'.join(dotted[:-1]) + '.'
return [prefix + n for n in dir(obj) if n.startswith(dotted[-1])]
else:
return [n for n in ns.keys() if n.startswith(text)]
def do_commands(self, arg):
if not arg:
bnum = len(bdb.Breakpoint.bpbynumber) - 1
else:
try:
bnum = int(arg)
except:
self.error('Usage: commands [bnum]\n ...\n end')
return
self.commands_bnum = bnum
if bnum in self.commands:
old_command_defs = (self.commands[bnum], self.commands_doprompt[bnum], self.commands_silent[bnum])
else:
old_command_defs = None
self.commands[bnum] = []
self.commands_doprompt[bnum] = True
self.commands_silent[bnum] = False
prompt_back = self.prompt
self.prompt = '(com) '
self.commands_defining = True
try:
self.cmdloop()
except KeyboardInterrupt:
if old_command_defs:
self.commands[bnum] = old_command_defs[0]
self.commands_doprompt[bnum] = old_command_defs[1]
self.commands_silent[bnum] = old_command_defs[2]
else:
del self.commands[bnum]
del self.commands_doprompt[bnum]
del self.commands_silent[bnum]
self.error('command definition aborted, old commands restored')
finally:
self.commands_defining = False
self.prompt = prompt_back
complete_commands = _complete_bpnumber
def do_break(self, arg, temporary=0):
if not arg:
if self.breaks:
self.message('Num Type Disp Enb Where')
for bp in bdb.Breakpoint.bpbynumber:
if bp:
self.message(bp.bpformat())
return
filename = None
lineno = None
cond = None
comma = arg.find(',')
if comma > 0:
cond = arg[comma + 1:].lstrip()
arg = arg[:comma].rstrip()
colon = arg.rfind(':')
funcname = None
if colon >= 0:
filename = arg[:colon].rstrip()
f = self.lookupmodule(filename)
if not f:
self.error('%r not found from sys.path' % filename)
return
filename = f
arg = arg[colon + 1:].lstrip()
try:
lineno = int(arg)
except ValueError:
self.error('Bad lineno: %s' % arg)
return
else:
try:
lineno = int(arg)
except ValueError:
try:
func = eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
func = arg
try:
if hasattr(func, '__func__'):
func = func.__func__
code = func.__code__
funcname = code.co_name
lineno = code.co_firstlineno
filename = code.co_filename
except:
(ok, filename, ln) = self.lineinfo(arg)
if not ok:
self.error('The specified object %r is not a function or was not found along sys.path.' % arg)
return
funcname = ok
lineno = int(ln)
if not filename:
filename = self.defaultFile()
line = self.checkline(filename, lineno)
if line:
err = self.set_break(filename, line, temporary, cond, funcname)
if err:
self.error(err)
else:
bp = self.get_breaks(filename, line)[-1]
self.message('Breakpoint %d at %s:%d' % (bp.number, bp.file, bp.line))
def defaultFile(self):
filename = self.curframe.f_code.co_filename
if self.mainpyfile:
filename = self.mainpyfile
return filename
do_b = do_break
complete_break = _complete_location
complete_b = _complete_location
def do_tbreak(self, arg):
self.do_break(arg, 1)
complete_tbreak = _complete_location
def lineinfo(self, identifier):
failed = (None, None, None)
idstring = identifier.split("'")
if len(idstring) == 1:
id = idstring[0].strip()
elif len(idstring) == 3:
id = idstring[1].strip()
else:
return failed
if id == '':
return failed
parts = id.split('.')
del parts[0]
if parts[0] == 'self' and len(parts) == 0:
return failed
fname = self.defaultFile()
if len(parts) == 1:
item = parts[0]
else:
f = self.lookupmodule(parts[0])
if f:
fname = f
item = parts[1]
answer = find_function(item, fname)
return answer or failed
def checkline(self, filename, lineno):
globs = self.curframe.f_globals if hasattr(self, 'curframe') else None
line = linecache.getline(filename, lineno, globs)
if not line:
self.message('End of file')
return 0
else:
line = line.strip()
if line and (line[0] == '#' or line[:3] == '"""' or line[:3] == "'''"):
self.error('Blank or comment')
return 0
return lineno
def do_enable(self, arg):
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
bp.enable()
self.message('Enabled %s' % bp)
complete_enable = _complete_bpnumber
def do_disable(self, arg):
args = arg.split()
for i in args:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
bp.disable()
self.message('Disabled %s' % bp)
complete_disable = _complete_bpnumber
def do_condition(self, arg):
args = arg.split(' ', 1)
try:
cond = args[1]
except IndexError:
cond = None
try:
bp = self.get_bpbynumber(args[0].strip())
except IndexError:
self.error('Breakpoint number expected')
except ValueError as err:
self.error(err)
bp.cond = cond
if not cond:
self.message('Breakpoint %d is now unconditional.' % bp.number)
else:
self.message('New condition set for breakpoint %d.' % bp.number)
complete_condition = _complete_bpnumber
def do_ignore(self, arg):
args = arg.split()
try:
count = int(args[1].strip())
except:
count = 0
try:
bp = self.get_bpbynumber(args[0].strip())
except IndexError:
self.error('Breakpoint number expected')
except ValueError as err:
self.error(err)
bp.ignore = count
if count > 0:
if count > 1:
countstr = '%d crossings' % count
else:
countstr = '1 crossing'
self.message('Will ignore next %s of breakpoint %d.' % (countstr, bp.number))
else:
self.message('Will stop next time breakpoint %d is reached.' % bp.number)
complete_ignore = _complete_bpnumber
def do_clear(self, arg):
if not arg:
try:
reply = input('Clear all breaks? ')
except EOFError:
reply = 'no'
reply = reply.strip().lower()
if reply in ('y', 'yes'):
bplist = [bp for bp in bdb.Breakpoint.bpbynumber if bp]
self.clear_all_breaks()
for bp in bplist:
self.message('Deleted %s' % bp)
return
if ':' in arg:
i = arg.rfind(':')
filename = arg[:i]
arg = arg[i + 1:]
try:
lineno = int(arg)
except ValueError:
err = 'Invalid line number (%s)' % arg
bplist = self.get_breaks(filename, lineno)
err = self.clear_break(filename, lineno)
if err:
self.error(err)
else:
for bp in bplist:
self.message('Deleted %s' % bp)
return
numberlist = arg.split()
for i in numberlist:
try:
bp = self.get_bpbynumber(i)
except ValueError as err:
self.error(err)
self.clear_bpbynumber(i)
self.message('Deleted %s' % bp)
do_cl = do_clear
complete_clear = _complete_location
complete_cl = _complete_location
def do_where(self, arg):
self.print_stack_trace()
do_w = do_where
do_bt = do_where
def _select_frame(self, number):
self.curindex = number
self.curframe = self.stack[self.curindex][0]
self.curframe_locals = self.curframe.f_locals
self.print_stack_entry(self.stack[self.curindex])
self.lineno = None
def do_up(self, arg):
if self.curindex == 0:
self.error('Oldest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = 0
else:
newframe = max(0, self.curindex - count)
self._select_frame(newframe)
do_u = do_up
def do_down(self, arg):
if self.curindex + 1 == len(self.stack):
self.error('Newest frame')
return
try:
count = int(arg or 1)
except ValueError:
self.error('Invalid frame count (%s)' % arg)
return
if count < 0:
newframe = len(self.stack) - 1
else:
newframe = min(len(self.stack) - 1, self.curindex + count)
self._select_frame(newframe)
do_d = do_down
def do_until(self, arg):
if arg:
try:
lineno = int(arg)
except ValueError:
self.error('Error in argument: %r' % arg)
return
if lineno <= self.curframe.f_lineno:
self.error('"until" line number is smaller than current line number')
return
else:
lineno = None
self.set_until(self.curframe, lineno)
return 1
do_unt = do_until
def do_step(self, arg):
self.set_step()
return 1
do_s = do_step
def do_next(self, arg):
self.set_next(self.curframe)
return 1
do_n = do_next
def do_run(self, arg):
if arg:
import shlex
argv0 = sys.argv[0:1]
sys.argv = shlex.split(arg)
sys.argv[:0] = argv0
raise Restart
do_restart = do_run
def do_return(self, arg):
self.set_return(self.curframe)
return 1
do_r = do_return
def do_continue(self, arg):
if not self.nosigint:
try:
Pdb._previous_sigint_handler = signal.signal(signal.SIGINT, self.sigint_handler)
except ValueError:
pass
self.set_continue()
return 1
do_c = do_cont = do_continue
def do_jump(self, arg):
if self.curindex + 1 != len(self.stack):
self.error('You can only jump within the bottom frame')
return
try:
arg = int(arg)
except ValueError:
self.error("The 'jump' command requires a line number")
try:
self.curframe.f_lineno = arg
self.stack[self.curindex] = (self.stack[self.curindex][0], arg)
self.print_stack_entry(self.stack[self.curindex])
except ValueError as e:
self.error('Jump failed: %s' % e)
do_j = do_jump
def do_debug(self, arg):
sys.settrace(None)
globals = self.curframe.f_globals
locals = self.curframe_locals
p = Pdb(self.completekey, self.stdin, self.stdout)
p.prompt = '(%s) ' % self.prompt.strip()
self.message('ENTERING RECURSIVE DEBUGGER')
sys.call_tracing(p.run, (arg, globals, locals))
self.message('LEAVING RECURSIVE DEBUGGER')
sys.settrace(self.trace_dispatch)
self.lastcmd = p.lastcmd
complete_debug = _complete_expression
def do_quit(self, arg):
self._user_requested_quit = True
self.set_quit()
return 1
do_q = do_quit
do_exit = do_quit
def do_EOF(self, arg):
self.message('')
self._user_requested_quit = True
self.set_quit()
return 1
def do_args(self, arg):
co = self.curframe.f_code
dict = self.curframe_locals
n = co.co_argcount
if co.co_flags & 4:
n = n + 1
if co.co_flags & 8:
n = n + 1
for i in range(n):
name = co.co_varnames[i]
if name in dict:
self.message('%s = %r' % (name, dict[name]))
else:
self.message('%s = *** undefined ***' % (name,))
do_a = do_args
def do_retval(self, arg):
if '__return__' in self.curframe_locals:
self.message(repr(self.curframe_locals['__return__']))
else:
self.error('Not yet returned!')
do_rv = do_retval
def _getval(self, arg):
try:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
except:
exc_info = sys.exc_info()[:2]
self.error(traceback.format_exception_only(*exc_info)[-1].strip())
raise
def _getval_except(self, arg, frame=None):
try:
if frame is None:
return eval(arg, self.curframe.f_globals, self.curframe_locals)
return eval(arg, frame.f_globals, frame.f_locals)
except:
exc_info = sys.exc_info()[:2]
err = traceback.format_exception_only(*exc_info)[-1].strip()
return _rstr('** raised %s **' % err)
def do_p(self, arg):
try:
self.message(repr(self._getval(arg)))
except:
pass
def do_pp(self, arg):
try:
self.message(pprint.pformat(self._getval(arg)))
except:
pass
complete_print = _complete_expression
complete_p = _complete_expression
complete_pp = _complete_expression
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg and arg != '.':
try:
if ',' in arg:
(first, last) = arg.split(',')
first = int(first.strip())
last = int(last.strip())
last = first + last
else:
first = int(arg.strip())
first = max(1, first - 5)
except ValueError:
self.error('Error in argument: %r' % arg)
return
elif self.lineno is None or arg == '.':
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
lines = linecache.getlines(filename, self.curframe.f_globals)
self._print_lines(lines[first - 1:last], first, breaklist, self.curframe)
self.lineno = min(last, len(lines))
if len(lines) < last:
self.message('[EOF]')
except KeyboardInterrupt:
pass
do_l = do_list
def do_longlist(self, arg):
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
(lines, lineno) = getsourcelines(self.curframe)
except OSError as err:
self.error(err)
return
self._print_lines(lines, lineno, breaklist, self.curframe)
do_ll = do_longlist
def do_source(self, arg):
try:
obj = self._getval(arg)
except:
return
try:
(lines, lineno) = getsourcelines(obj)
except (OSError, TypeError) as err:
self.error(err)
return
self._print_lines(lines, lineno)
complete_source = _complete_expression
def _print_lines(self, lines, start, breaks=(), frame=None):
if frame:
current_lineno = frame.f_lineno
exc_lineno = self.tb_lineno.get(frame, -1)
else:
current_lineno = exc_lineno = -1
for (lineno, line) in enumerate(lines, start):
s = str(lineno).rjust(3)
if len(s) < 4:
s += ' '
if lineno in breaks:
s += 'B'
else:
s += ' '
if lineno == current_lineno:
s += '->'
elif lineno == exc_lineno:
s += '>>'
self.message(s + '\t' + line.rstrip())
def do_whatis(self, arg):
try:
value = self._getval(arg)
except:
return
code = None
try:
code = value.__code__
except Exception:
pass
if code:
self.message('Function %s' % code.co_name)
return
try:
code = value.__func__.__code__
except Exception:
pass
if code:
self.message('Method %s' % code.co_name)
return
if value.__class__ is type:
self.message('Class %s.%s' % (value.__module__, value.__qualname__))
return
self.message(type(value))
complete_whatis = _complete_expression
def do_display(self, arg):
if not arg:
self.message('Currently displaying:')
for item in self.displaying.get(self.curframe, {}).items():
self.message('%s: %r' % item)
else:
val = self._getval_except(arg)
self.displaying.setdefault(self.curframe, {})[arg] = val
self.message('display %s: %r' % (arg, val))
complete_display = _complete_expression
def do_undisplay(self, arg):
if arg:
try:
del self.displaying.get(self.curframe, {})[arg]
except KeyError:
self.error('not displaying %s' % arg)
else:
self.displaying.pop(self.curframe, None)
def complete_undisplay(self, text, line, begidx, endidx):
return [e for e in self.displaying.get(self.curframe, {}) if e.startswith(text)]
def do_interact(self, arg):
ns = self.curframe.f_globals.copy()
ns.update(self.curframe_locals)
code.interact('*interactive*', local=ns)
def do_alias(self, arg):
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message('%s = %s' % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message('%s = %s' % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
def do_unalias(self, arg):
args = arg.split()
if len(args) == 0:
return
if args[0] in self.aliases:
del self.aliases[args[0]]
def complete_unalias(self, text, line, begidx, endidx):
return [a for a in self.aliases if a.startswith(text)]
commands_resuming = ['do_continue', 'do_step', 'do_next', 'do_return', 'do_quit', 'do_jump']
def print_stack_trace(self):
try:
for frame_lineno in self.stack:
self.print_stack_entry(frame_lineno)
except KeyboardInterrupt:
pass
def print_stack_entry(self, frame_lineno, prompt_prefix=line_prefix):
(frame, lineno) = frame_lineno
if frame is self.curframe:
prefix = '> '
else:
prefix = ' '
self.message(prefix + self.format_stack_entry(frame_lineno, prompt_prefix))
def do_help(self, arg):
if not arg:
return cmd.Cmd.do_help(self, arg)
try:
try:
topic = getattr(self, 'help_' + arg)
return topic()
except AttributeError:
command = getattr(self, 'do_' + arg)
except AttributeError:
self.error('No help for %r' % arg)
if sys.flags.optimize >= 2:
self.error('No help for %r; please do not run Python with -OO if you need command help' % arg)
return
self.message(command.__doc__.rstrip())
do_h = do_help
def help_exec(self):
self.message((self.help_exec.__doc__ or '').strip())
def help_pdb(self):
help()
def lookupmodule(self, filename):
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f) and self.canonic(f) == self.mainpyfile:
return f
(root, ext) = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
def _runmodule(self, module_name):
self._wait_for_mainpyfile = True
self._user_requested_quit = False
import runpy
(mod_name, mod_spec, code) = runpy._get_module_details(module_name)
self.mainpyfile = self.canonic(code.co_filename)
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({'__name__': '__main__', '__file__': self.mainpyfile, '__package__': mod_spec.parent, '__loader__': mod_spec.loader, '__spec__': mod_spec, '__builtins__': __builtins__})
self.run(code)
def _runscript(self, filename):
import __main__
__main__.__dict__.clear()
__main__.__dict__.update({'__name__': '__main__', '__file__': filename, '__builtins__': __builtins__})
self._wait_for_mainpyfile = True
self.mainpyfile = self.canonic(filename)
self._user_requested_quit = False
with open(filename, 'rb') as fp:
statement = "exec(compile(%r, %r, 'exec'))" % (fp.read(), self.mainpyfile)
self.run(statement)
if __doc__ is not None:
_help_order = ['help', 'where', 'down', 'up', 'break', 'tbreak', 'clear', 'disable', 'enable', 'ignore', 'condition', 'commands', 'step', 'next', 'until', 'jump', 'return', 'retval', 'run', 'continue', 'list', 'longlist', 'args', 'p', 'pp', 'whatis', 'source', 'display', 'undisplay', 'interact', 'alias', 'unalias', 'debug', 'quit']
for _command in _help_order:
__doc__ += getattr(Pdb, 'do_' + _command).__doc__.strip() + '\n\n'
__doc__ += Pdb.help_exec.__doc__
del _help_order
del _command
def run(statement, globals=None, locals=None):
Pdb().run(statement, globals, locals)
def runeval(expression, globals=None, locals=None):
return Pdb().runeval(expression, globals, locals)
def runctx(statement, globals, locals):
run(statement, globals, locals)
def runcall(*args, **kwds):
return Pdb().runcall(*args, **kwds)
def set_trace(*, header=None):
pdb = Pdb()
if header is not None:
pdb.message(header)
pdb.set_trace(sys._getframe().f_back)
def post_mortem(t=None):
if t is None:
t = sys.exc_info()[2]
if t is None:
raise ValueError('A valid traceback must be passed if no exception is being handled')
p = Pdb()
p.reset()
p.interaction(None, t)
def pm():
post_mortem(sys.last_traceback)
TESTCMD = 'import x; x.main()'
def test():
run(TESTCMD)
def help():
import pydoc
pydoc.pager(__doc__)
_usage = 'usage: pdb.py [-c command] ... [-m module | pyfile] [arg] ...\n\nDebug the Python program given by pyfile. Alternatively,\nan executable module or package to debug can be specified using\nthe -m switch.\n\nInitial commands are read from .pdbrc files in your home directory\nand in the current directory, if they exist. Commands supplied with\n-c are executed after commands from .pdbrc files.\n\nTo let the script run until an exception occurs, use "-c continue".\nTo let the script run up to a given line X in the debugged file, use\n"-c \'until X\'".'
def main():
import getopt
(opts, args) = getopt.getopt(sys.argv[1:], 'mhc:', ['--help', '--command='])
if not args:
print(_usage)
sys.exit(2)
commands = []
run_as_module = False
for (opt, optarg) in opts:
if opt in ('-h', '--help'):
print(_usage)
sys.exit()
elif opt in ('-c', '--command'):
commands.append(optarg)
elif opt in ('-m',):
run_as_module = True
mainpyfile = args[0]
if run_as_module or not os.path.exists(mainpyfile):
print('Error:', mainpyfile, 'does not exist')
sys.exit(1)
sys.argv[:] = args
if not run_as_module:
sys.path[0] = os.path.dirname(mainpyfile)
pdb = Pdb()
pdb.rcLines.extend(commands)
while True:
try:
if run_as_module:
pdb._runmodule(mainpyfile)
else:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
break
print('The program finished and will be restarted')
except Restart:
print('Restarting', mainpyfile, 'with arguments:')
print('\t' + ' '.join(args))
except SystemExit:
print('The program exited via sys.exit(). Exit status:', end=' ')
print(sys.exc_info()[1])
except SyntaxError:
traceback.print_exc()
sys.exit(1)
except:
traceback.print_exc()
print('Uncaught exception. Entering post mortem debugging')
print("Running 'cont' or 'step' will restart the program")
t = sys.exc_info()[2]
pdb.interaction(None, t)
print('Post mortem debugger finished. The ' + mainpyfile + ' will be restarted')
if __name__ == '__main__':
import pdb
pdb.main()
|
#! /usr/bin/python
__title__="Homework 9"
__author__="Thomas Benge"
__date__="04/26/13"
# Program that allows users to share information over a network
import sys
import re
import socket
import threading
import Queue
from NetIO import getNetLine, putNetLine
pat = re.compile(
'^\s*[Ss][Ww][Aa][Pp]\s+(\S+)\s+[Uu][Ss][Ii][Nn][Gg]\s+(\S+)\s*$')
class handleClient(threading.Thread):
def __init__(self, swapDict, swapLock, strm, addr, reapQ):
threading.Thread.__init__(self)
self.dict = swapDict
self.lock = swapLock
self.strm = strm
self.addr = addr
self.repq = reapQ
def handleExchange(self, key, value):
self.lock.acquire()
if key in self.dict:
rendezvousPt = self.dict[key]
del self.dict[key] # both threads still have a reference
self.lock.release() # end exclusive access to self.dict
keyLock = rendezvousPt['cond']
keyLock.acquire() # get ready to wake the 1st thread
newVal = value
value = rendezvousPt[key]
rendezvousPt[key] = newVal
keyLock.notify() # queue up the 1st thread
keyLock.release() # start up the 1st thread
else:
keyCond = threading.Condition()
rendezvousPt = {key: value, 'cond': keyCond }
self.dict[key] = rendezvousPt
keyCond.acquire() # make sure the notify can't be lost
self.lock.release() # end exclusive access to self.dict
keyCond.wait() # ZZZZZZZZ waiting 2nd thread
keyCond.release()
value = rendezvousPt[key]
outline = 'RETURN ' + value + ' USING ' + key
return outline
def run(self):
print "Connection open: ", self.addr
sys.stdout.flush()
try:
instrm = self.strm.makefile('r')
outstrm = self.strm.makefile('w')
inline = getNetLine(instrm)
while inline:
print self.addr, ' -> ', inline
mat = pat.match(inline)
if (mat == None):
outline = '!' + inline
else:
key = mat.group(2)
value = mat.group(1)
outline = self.handleExchange(key, value)
putNetLine(outstrm, outline)
inline = getNetLine(instrm)
print "Connection close: ", self.addr
except socket.error:
print "Connection error: ", self.addr
sys.stdout.flush()
instrm.close()
outstrm.close()
self.strm.close()
self.repq.put(self,True)
class reapEm(threading.Thread):
def __init__(self, threadQ):
threading.Thread.__init__(self)
self.q = threadQ
def run(self):
while True:
nextT = self.q.get(True)
nextT.join()
def serverExchanger():
reapQueue = Queue.Queue()
reapThread = reapEm(reapQueue)
reapThread.daemon = True
reapThread.start()
swapDict= { }
swapLock = threading.Lock()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) ;
s.bind(('', 33101))
s.listen(5)
while True:
strm, addr = s.accept()
try:
nt = handleClient(swapDict,swapLock,strm,addr,reapQueue)
nt.daemon = True
nt.start()
except:
print "Unable to start thread for ", addr
strm.close()
except KeyboardInterrupt:
print 'Exiting'
for t in threading.enumerate():
print "Running " + str(t)
s.close()
if __name__ == "__main__":
serverExchanger()
|
import urllib
import urllib2
import re
import httplib
from basecall import BaseCall
class CssminifierCall(BaseCall):
def exec_request(self):
data = urllib.urlencode({
'input': self.original })
ua = 'Sublime Text - cssminifier'
req = urllib2.Request("http://cssminifier.com/raw", data, headers = { 'User-Agent': ua, 'Content-Type': 'application/x-www-form-urlencoded' })
file = urllib2.urlopen(req, timeout=self.timeout)
mini_content = file.read().strip()
if len(mini_content) > 0:
return re.sub("[\n]+", " ", mini_content) if self.rm_new_lines else mini_content
else:
return None
|
from zmq_utils import send_array_fast, recv_array_fast
import zmq
addr = "tcp://127.0.0.1:10000"
context = zmq.Context()
socket = context.socket(zmq.REP) # pylint: disable=no-member
socket.bind(addr)
print(f"Listening to: {addr}")
while True:
data = recv_array_fast(socket, copy=False)
send_array_fast(socket, data, copy=False)
|
import os
import sys
import re
import getopt
def connectToWLST():
try:
connect(userConfigFile='/opt/Jenkins/wltserver/config.secure',userKeyFile='/opt/Jenkins/wltserver/key.secure',url='t3://freappd15.oracle.eagle.org:7001')
print 'Successfully connected to the WLST\n'
except:
print 'The domain is unreacheable. Please try again\n'
exit()
def stopJVM():
try:
shutdown("Maximo01")
except:
print 'Error stopping\n'
sys.exit(2)
connectToWLST()
stopJVM()
|
class Car(object):
def __init__(self, name):
self.__washCar()
self.name = name
def __washCar(self):
print 'Washing the Car'
def fix(self):
print 'Fixing Car...'
def drive(self):
pass
def breaking(self):
pass
""" Inheritance: The following class 'Inherits' from the Super Class """
class Motorcycle(Car):
def drive(self):
return 'Riding Bike!'
def breaking(self):
return 'Bike stopping'
""" Inheritance: The following class 'Inherits' from the Super Class """
class Sedan(Car):
def drive(self):
return 'The Sedan is being driven'
def breaking(self):
return 'Sedan breaking!'
""" Polymorphism is implemented below """
cars = [Sedan('Lexus'), Sedan('Shelby'), Motorcycle('Suzuki'), Motorcycle('Yamaha')]
for car in cars:
print car.name + ': ' + car.drive()
""" The following example demonstrates Encapsulation whereby __washCar() is not accessible outside the scope of the class """
mazda = Car('Axela')
mazda.fix()
#mazda.__washCar() is not accesible from object.
|
phrase = input("What is your phrase ")
word= (len(phrase))
count = 0
while count < word:
print("boop ", end="")
count=count + 1
|
def process_item(x):
copy = x
if copy + 1 == 2:
return copy + 1
found = 0
while found == 0:
copy += 1
for i in range(2, copy//2 + 2):
if copy % i == 0:
found = 0
break
else:
found = 1
return copy |
import sys
import random
import os
import datetime
import json
def load_files():
"""
Loading files from the same directory of the script. There are 3 txt files and their names are self explanatory.
It also closes the files after loading so you can edit the files.
"""
directory = os.getcwd()
# Load the names
with open(directory + '/names.txt') as names:
name_list = names.read().splitlines()
names.close()
# Load the last names
with open(directory + '/last_names.txt') as last_names:
last_name_list = last_names.read().splitlines()
last_names.close()
# Load the email providers
with open(directory + ('/email.txt')) as mails:
mail_list = mails.read().splitlines()
mails.close()
return name_list, last_name_list, mail_list
model_name = input('Enter the name of the model: ')
num_of_items = int(input('How many items do you want?: '))
pk = int(input('Enter the starting primary key: '))
# if len(sys.argv) > 1:
# print('there are extra arguments in command line')
# if '-n' in sys.argv:
# item_index = sys.argv.index('-n') + 1
# num_of_items = sys.argv[item_index]
# print(f'You want {num_of_items} items.')
# elif '--number' in sys.argv:
# item_index = sys.argv.index('--number') + 1
# num_of_items = sys.argv[item_index]
# else:
# num_of_items = int(input('How many items do you want?: '))
# if '--pk' in sys.argv:
# pk_index = sys.argv.index('--pk') + 1
# pk = sys.argv[pk_index]
# else:
# pk = int(input('Enter the starting primary key: '))
name_list, last_name_list, mail_list = load_files()
def create_item(pk):
"""
This creates a single python dict with random name, last name, ıusername and email.
Username creating scheme is first name + first letter of the last name.
Note that passwords are all same 'testing1234' django hashes passwords with pbkdf2 sha250 with random salt and 180000 iterations.
input: int
return: dict
"""
name = random.choice(name_list)
last_name = random.choice(last_name_list)
username = name + last_name[0]
username = username.lower()
email = username + '@' + random.choice(mail_list)
item = {}
item['model'] = 'users.' + model_name
item['pk'] = pk
item['fields'] = {}
item['fields']['password'] = 'pbkdf2_sha256$180000$UHsXnV8XFT8M$5n9TYOYZfiHk2NPAU4hZR6P5pyiDz4Cku7q4xo9njNk='
item['fields']['last_login'] = None
item['fields']['is_superuser'] = False
item['fields']['username'] = username # usernames should be lower
item['fields']['first_name'] = name
item['fields']['last_name'] = last_name
item['fields']['email'] = email
item['fields']['is_staff'] = False
item['fields']['is_Active'] = True
item['fields']['date_joined'] = datetime.datetime.today().strftime('%Y-%m-%dT%H:%M:%SZ')
item['fields']['groups'] = []
item['fields']['user_premissions'] = []
return item
def generate_pk(pk):
pk = [pk for pk in range(int(num_of_items))]
yield pk
items = []
for i in range(int(num_of_items)):
items.append(create_item(pk))
pk += 1
with open("data_fixtures.json", 'w') as write_file:
json.dump(items, write_file)
|
import pygame
import os
pygame.init()
game_screen = pygame.display.set_mode(size=(480, 600))
# 1、加载背景图
background = pygame.image.load("./images/background.png")
# 2、blit绘制背景图
game_screen.blit(background, (0, 0))
# 3、update更新屏幕
hero = pygame.image.load("./images/me1.png")
game_screen.blit(hero, (200, 400))
pygame.display.update()
# 定义一个rect对象记录飞机的初始位置
hero_rect = pygame.Rect(200, 400, 102, 126)
# 创建一个游戏时钟对象
clock = pygame.time.Clock()
while True:
clock.tick(60)
# 捕获事件列表
event_list = pygame.event.get()
if len(event_list) > 0:
print(event_list)
# 修改飞机的位置
hero_rect.y -= 1
if hero_rect.y <= -126:
hero_rect.y = 600
# 绘制图像
game_screen.blit(background, (0, 0))
game_screen.blit(hero, hero_rect)
pygame.display.update()
|
'''
作者:张斌
时间:2019.3.24
版本功能:简单线性回归的实现,为了使得建立的模型使得方差最小 从而获得回归线y=b1x+b0
'''
#简单线性回归:只有一个自变量 y=k*x+b 预测使 (y-y*)^2 最小
import numpy as np
def fitSLR(x,y):
'''
:param x: 自变量
:param y: 因变量
:return: 模型参数
'''
n=len(x)
dinominator = 0
numerator=0
for i in range(0,n):
numerator += (x[i]-np.mean(x))*(y[i]-np.mean(y))
dinominator += (x[i]-np.mean(x))**2
# print("numerator:"+str(numerator))
# print("dinominator:"+str(dinominator))
b1 = numerator/float(dinominator)
# b0 = np.mean(y)/float(np.mean(x))
b0=np.mean(y)-b1*np.mean(x)
return b0,b1
# y= b0+x*b1
def prefict(x,b0,b1):
'''
:param x: 要预测的自变量
:param b0:模型参数
:param b1:模型参数
:return:y 因变量
'''
return b0+x*b1
#实例
x=[1,3,2,1,3]
y=[14,24,18,17,27]
b0,b1=fitSLR(x, y)
print("intercept:{},slope:{}".format(b0,b1)) #截距 坡度
x_predict = 6
y_predict = prefict(x_predict,b0,b1) #预测x=6
print("当自变量x取{}时,y_predict:{}".format(x_predict,y_predict))
print("简单线性回归方程为:y={}x+{}".format(b1,b0)) |
"""
All core commands implemented in RAPyDo
"""
from importlib.util import module_from_spec, spec_from_file_location
from pathlib import Path
from types import ModuleType
from typing import Dict, Optional
from controller import PROJECT_DIR
BACKUP_MODULES: Dict[str, ModuleType] = {}
RESTORE_MODULES: Dict[str, ModuleType] = {}
PASSWORD_MODULES: Dict[str, ModuleType] = {}
TUNING_MODULES: Dict[str, ModuleType] = {}
def load_module(path: Path) -> Dict[str, ModuleType]:
# Initially it was:
# for c in commands_folder.glob("[!_|.]*.py"):
# import_module(f"controller.commands.{c.stem}")
loaded_modules: Dict[str, ModuleType] = {}
if path.is_dir():
for c in path.glob("[!_|.]*.py"):
spec = spec_from_file_location(c.stem, c)
if spec and spec.loader:
command = module_from_spec(spec)
# "_LoaderProtocol" has no attribute "exec_module"
# https://issueexplorer.com/issue/python/typeshed/6163
spec.loader.exec_module(command)
loaded_modules[c.stem] = command
return loaded_modules
def load_commands(project: Optional[str]) -> None:
# re-initialization needed for tests
BACKUP_MODULES.clear()
RESTORE_MODULES.clear()
PASSWORD_MODULES.clear()
TUNING_MODULES.clear()
commands_folder = Path(__file__).resolve().parent
BACKUP_MODULES.update(load_module(commands_folder.joinpath("backup_modules")))
RESTORE_MODULES.update(load_module(commands_folder.joinpath("restore_modules")))
PASSWORD_MODULES.update(load_module(commands_folder.joinpath("password_modules")))
TUNING_MODULES.update(load_module(commands_folder.joinpath("tuning_modules")))
# Do custom commands exist?
if project:
custom_commands = PROJECT_DIR.joinpath(project, "commands")
load_module(custom_commands)
BACKUP_MODULES.update(load_module(custom_commands.joinpath("backup_modules")))
RESTORE_MODULES.update(load_module(custom_commands.joinpath("restore_modules")))
PASSWORD_MODULES.update(
load_module(custom_commands.joinpath("password_modules"))
)
TUNING_MODULES.update(load_module(custom_commands.joinpath("tuning_modules")))
load_module(commands_folder)
# Do not import outside, otherwise it will lead to a circular import:
# cannot import name 'Configuration' from partially initialized module
from controller.app import Configuration
if Configuration.swarm_mode:
load_module(commands_folder.joinpath("swarm"))
else:
load_module(commands_folder.joinpath("compose"))
|
#update test cat's owner
#whom we will send messages
from azure.storage import TableService
import config
table_service = TableService(account_name=config.ACC_NAME,
account_key=config.ACC_KEY)
#newMaster = {'masterID' : '188622142'}
#table_service.update_entity('bandcredentials', 'band','test', newMaster)
#table_service.insert_entity('bandcredentials', newMaster)
task = table_service.get_entity('bandcredentials', 'band', 'test')
print task.masterID |
from __future__ import print_function
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
import smtplib
from email.mime.text import MIMEText
from time import gmtime, strftime, localtime, sleep
import random
# If modifying these scopes, delete the file token.json.
SCOPES = 'https://www.googleapis.com/auth/spreadsheets.readonly'
# The ID and range of a sample spreadsheet.
SPREADSHEET_ID = '1xtfkX5h9vFIx2XwVvVxIIENpBgKUapkavCO6h99aCKE'
RANGE_NAME = 'Schedule!A2:B'
#emails = ["kpark21@student.kis.or.kr"]
mail_user = "schedulebot@student.kis.or.kr"
password = "distcodegwlm"
print("started")
isSent = True
msg = "test"
student = 0
formResponses = 'tempProj!A2:C'
def main(dayName):
"""Shows basic usage of the Sheets API.
Prints values from a sample spreadsheet.
"""
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
# Call the Sheets API
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID, range=formResponses).execute()
values = result.get('values',[])
for email in values:
ranNum = random.randint(100000,999999)
rec_email = email[1]
ranNum = email[2]
server = smtplib.SMTP("smtp.googlemail.com:587")
server.ehlo()
server.starttls()
server.login(mail_user, password)
text = MIMEText("Dear "+email[0]+",\n\n"+"Thank you for your interest in participating in this survey. Your participation is greatly appreciated." +"\n\n"+"If you are under the age of 18, please have your parents sign this(https://docs.google.com/document/d/1bRGbqTWUNcw_ZfQ2zGk-oevF01M2YZaqFquieDL-Iu0/edit) consent letter and email schoe21@student.kis.or.kr before completing the survey. There is more important information in the form, so please read the form even if you do not have to fill one out. Completion of this form means you are agreeing to having your data be used in this study, which includes the survey data and your GPA. However, the researcher will not know your GPA. This form is not required if you are 18 or above (Korean or American Age)."+"\n\n"+"Your ID Number is ("+str(ranNum)+"). Please complete the form with this ID Number. " +"\n\n"+"Please complete the form and survey as soon as possible."+"\n\n"+"Thank you."+"\n\n\n"+"Powered by an unpaid Kevin Park")
text['Subject'] = "Survey"
text['To'] = rec_email
text['From'] = mail_user
server.sendmail(mail_user,rec_email,text.as_string())
server.quit()
sleep(1)
#print("sent")
"""
def whatDayIsIt():
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('sheets', 'v4', http=creds.authorize(Http()))
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=SPREADSHEET_ID, range=RANGE_NAME).execute()
values = result.get('values',[])
for day in values:
if day[0] == date:
main(day[1])
#print (day[1])
else:
pass
"""
if __name__ == '__main__':
date = strftime("%j",localtime())
date = date.lstrip("0")
print(date)
main("a")
#whatDayIsIt()
|
import csv as csv
import itertools
import statistics
from statistics import mean
import numpy as np
import matplotlib as matplt
import matplotlib.pyplot as plt
#opens the CSV file, turns it into a list of lists with tabs as the delimiter not commas
lol = list(csv.reader(open('S&P500_Returns_1926-2020.csv', 'rt'), delimiter='\t'))
#Reverses the order of the data to start in 1926 not in 2020
lol.reverse()
#Gets the Average yearly returns from the whole 94 years
totalReturns = 0
for row in lol:
totalReturns += float(row[1])
totalReturns = totalReturns / 94
print("Average Gains over past 94 years: " + str(round(totalReturns,2)) + "%")
#function that gets the final value of an investment over the specified interval of years
def calculateReturns(initialAmount, startYear, endYear, lists):
investmentAmount = initialAmount
for row in itertools.islice(lists , startYear, endYear + 1):
percentChange = float(row[1])
capitalGains = 1 + (percentChange / 100)
investmentAmount = investmentAmount * capitalGains
try:
#print(str(lists[startYear][0]) + "-" + str(lists[endYear][0]) + "\t" + str(f"{investmentAmount:,.2f}"))
return investmentAmount
except:
print("Something Broke... Fix It")
#Runs the 30 year simulation
#list to hold all data
dataFromEach = []
length_in_years = 30 #This is the number to change and it should be able to change the whole program
for i in range(len(lol)):
if i <= (len(lol) - (length_in_years + 1)):
dataFromEach.append(calculateReturns(17000,i,i + length_in_years,lol))
#Changes data type from string
for i in range(len(dataFromEach)):
float(dataFromEach[i])
round(dataFromEach[i],2)
#finds min, max, and variance of the data
print("Minimum Balance: " + str(f"{min(dataFromEach):,.2f}"))
print("Maximum Balance: " + str(f"{max(dataFromEach):,.2f}"))
print("Average Balance: " + str(f"{mean(dataFromEach):,.2f}"))
print("Standard Deviation: " + str(f"{np.std(dataFromEach):,.2f}"))
#creates a list of the years in the data to be used for plot y axis
years = []
for i in range(len(lol)-length_in_years):
years.append((1956 + i))
# Data for plotting
t = years
s = dataFromEach
fig, ax = plt.subplots()
ax.plot(t, s)
#keeps the y axis from being in scientific notation
ax.ticklabel_format(useOffset=False)
ax.ticklabel_format(style='plain')
#puts commas in the large numbers on y axis
ax.get_yaxis().set_major_formatter(
matplt.ticker.FuncFormatter(lambda x, p: format(float(x), ',')))
#horizontal lines to show total cost of mortgage if 3% down and 20% down
ax.axhline(y=182405.00)
ax.axhline(y=119566.02)
#sets titles
ax.set(xlabel='time', ylabel='Investment value (after 30yrs)',
title='Value of Investment portfolio started at $17,000')
ax.grid()
#shows graph
fig.savefig("test.png")
plt.show()
|
# -*- coding: utf-8 -*-
import regex as re
import pickle as pickle
import bisect, csv, codecs, bleach, json, operator, os, subprocess
import time as pytime
from collections import OrderedDict
from copy import deepcopy
import numpy as np
from sefaria.model import *
from sources.functions import post_link
from sefaria.system.exceptions import DuplicateRecordError
from sefaria.system.exceptions import InputError
from sefaria.utils import hebrew
import itertools
from linking_utilities.dibur_hamatchil_matcher import get_maximum_subset_dh
import logging, glob
def save_links_dicta(category, mesorat_hashas_name):
mesorat_hashas = json.load(open(mesorat_hashas_name,'rb'))
"""
new_mesorat_hashas = []
for l in mesorat_hashas:
l[0] = l[0].replace(u'<d>',u'')
l[1] = l[1].replace(u'<d>', u'')
lr = [Ref(l[0]),Ref(l[1])]
if lr[0] == None or lr[1] == None:
continue
if lr[0].index.title == 'Bava Batra' or lr[1].index.title == 'Bava Batra':
new_mesorat_hashas += [l]
objStr = json.dumps(new_mesorat_hashas, indent=4, ensure_ascii=False)
with open('bava_batra.json', "w") as f:
f.write(objStr.encode('utf-8'))
mesorat_hashas = new_mesorat_hashas
"""
num_dups = 0
for link in mesorat_hashas:
for i,l in enumerate(link):
link[i] = l.replace('<d>','')
link_obj = {"auto": True, "refs": link, "anchorText": "", "generated_by": "mesorat_hashas.cs (Dicta) {}".format(category),
"type": "Automatic Mesorat HaShas"}
try:
Link(link_obj).save()
except DuplicateRecordError:
num_dups += 1
pass # poopy
print("num dups {}".format(num_dups))
def save_links_post_request(category):
query = {"generated_by": "mesorat_hashas.cs (Dicta) {}".format(category), "auto": True, "type": "Automatic Mesorat HaShas"}
ls = LinkSet(query)
links = [l.contents() for l in ls]
post_link(links)
#stop_words = [w[0] for w in json.load(open("word_counts.json", "rb"), encoding='utf8')[:100]]
stop_words = ['רב','רבי','בן','בר','בריה','אמר','כאמר','וכאמר','דאמר','ודאמר','כדאמר','וכדאמר','ואמר','כרב',
'ורב','כדרב','דרב','ודרב','וכדרב','כרבי','ורבי','כדרבי','דרבי','ודרבי','וכדרבי',"כר'","ור'","כדר'",
"דר'","ודר'","וכדר'",'א״ר','וא״ר','כא״ר','דא״ר','דאמרי','משמיה','קאמר','קאמרי','לרב','לרבי',
"לר'",'ברב','ברבי',"בר'",'הא','בהא','הך','בהך','ליה','צריכי','צריכא','וצריכי','וצריכא','הלל','שמאי']
stop_phrases = ['למה הדבר דומה','כלל ופרט וכלל','אלא כעין הפרט','מה הפרט','כלל ופרט','אין בכלל','אלא מה שבפרט']
#stop_words = []
def base_tokenizer(base_str):
base_str = base_str.strip()
base_str = bleach.clean(base_str, tags=[], strip=True)
for match in re.finditer(r'\(.*?\)', base_str):
if library.get_titles_in_string(match.group()) and len(match.group().split()) <= 5:
base_str = base_str.replace(match.group(), "")
# base_str = re.sub(ur"(?:\(.*?\)|<.*?>)", u"", base_str)
base_str = re.sub(r'־',' ',base_str)
base_str = re.sub(r'[A-Za-z]','',base_str)
for phrase in stop_phrases:
base_str = base_str.replace(phrase,'')
word_list = re.split(r"\s+", base_str)
word_list = [re.sub(r'\P{L}','',re.sub(r'((?<!^)\u05D9)','',re.sub(r'ו','',w))) for w in word_list if w not in stop_words] #remove non-leading yuds and all vuvs
word_list = [w for w in word_list if len(w.strip()) > 0]
return word_list
def generate_dicta_input(category):
mesechtot_names = get_texts_from_category(category)
#mesechtot_names = ["Shabbat"]
for mes in mesechtot_names:
index = library.get_index(mes)
vtitle = "Tanach with Text Only" if category == "Tanakh" else None
mes_tim = index.text_index_map(base_tokenizer,strict=False, lang="he", vtitle=vtitle)
mes_list = index.nodes.traverse_to_list(lambda n,_: TextChunk(n.ref(), "he", vtitle=vtitle).ja().flatten_to_array() if not n.children else [])
mes_str_array = [w for seg in mes_list for w in base_tokenizer(seg)]
mes_str = " ".join(mes_str_array)
with codecs.open("dicta_input_{}/{}.txt".format(category.lower(),mes),'wb',encoding='utf8') as f:
f.write(mes_str)
pickle.dump(mes_tim, open('dicta_text_index_map/{}.pkl'.format(mes), 'wb'))
print('Done with {}'.format(mes))
def generate_dicta_output(category):
mesechtot_names = get_texts_from_category(category)
#mesechtot_names = ["Shabbat"]
mesechtot_names.sort() #put them in alphabetical order
mesorat_hashas_dicta = {}
pickle_jar = {}
for fname in glob.glob("dicta_output/ComparisonResults_*.txt"):
with codecs.open(fname,'r',encoding='utf8') as f:
curr_match = []
curr_match_content = []
for line in f:
if "***" in line:
curr_match = []
curr_match_content = []
elif len(line.strip()) == 0 or (len(line.strip()) == 1 and line.strip()[0] == '\uFEFF'):
combos = list(itertools.combinations(list(zip(curr_match,curr_match_content)),2))
for combo in combos:
combo = sorted(combo,key=lambda x: x[0])
combo_mes = [c[0] for c in combo]
combo_content = [c[1] for c in combo]
mesorah_key = '|'.join(combo_mes)
mesorah_value = '|'.join(combo_content)
if mesorah_key not in mesorat_hashas_dicta:
mesorat_hashas_dicta[mesorah_key] = ''
mesorat_hashas_dicta[mesorah_key] = mesorah_value
else:
temp_match = line.split(':')[0]
temp_match_content = line.split(':')[1]
curr_match.append(temp_match)
curr_match_content.append(temp_match_content)
for mes in mesechtot_names:
print(mes)
pickle_jar[mes] = pickle.load(open('dicta_text_index_map/{}.pkl'.format(mes)))
temp_mesorat_hashas = []
for key in mesorat_hashas_dicta:
word_indexes = key.split('|')
content = mesorat_hashas_dicta[key].split('|')
match = []
match_index = []
is_bad_match = False
for iwi,wi in enumerate(word_indexes):
mes = wi.split(' ')[0]
temp_word_index = wi.split(' ')[1]
start = int(temp_word_index.split('-')[0][1:]) #get rid of brackets
end = int(temp_word_index.split('-')[1][:-1])
#print "ORIG:{}MES:{}S:{}E:{}--".format(wi,mes,start,end)
mes_ref_list = pickle_jar[mes][1]
mes_ind_list = pickle_jar[mes][0]
start_ref = mes_ref_list[bisect.bisect_right(mes_ind_list, start)-1]
end_ref = mes_ref_list[bisect.bisect_right(mes_ind_list, end)-1]
if start_ref == end_ref:
ref = start_ref
else:
try:
ref = start_ref.to(end_ref)
except AssertionError:
print(start_ref, end_ref, start, end)
is_bad_match = True
break
match.append(str(ref)) # + u' ||| ' + content[iwi])
match_index.append([start,end])
if is_bad_match:
continue
matchref = [Ref(match[0]),Ref(match[1])]
temp_mesorat_hashas.append({"match":matchref,"match_index":match_index})
#remove matches that are subsets
intersection_map = {}
for matchobj in temp_mesorat_hashas:
matchref = matchobj['match']
for i,m in enumerate(matchref):
mrange = m.range_list()
for r in mrange:
r_str = r.normal()
if r_str not in intersection_map:
intersection_map[r_str] = []
intersection_map[r_str].append((matchref[i],matchref[int(i == 0)]))
# filter out all-to-all if you're doing corpus-to-corpus
if isinstance(category, list) and len(category) >= 2:
cat1texts = get_texts_from_category(category[0])
cat2texts = get_texts_from_category(category[1])
def corp2corp_filter(link):
link_ref = link['match']
return (link_ref[0].book in cat1texts and link_ref[1].book in cat2texts) or \
(link_ref[0].book in cat2texts and link_ref[1].book in cat1texts)
temp_mesorat_hashas = list(filter(corp2corp_filter, temp_mesorat_hashas))
mesorat_hashas = []
mesorat_hashas_with_indexes = []
num_dups = 0
for matchobj in temp_mesorat_hashas:
matchref = matchobj['match']
is_subset = False
intersected2 = []
intersected1 = intersection_map[matchref[0].starting_ref().normal()]
for m_inter in intersected1:
if not (m_inter[0] == matchref[0] and m_inter[1] == matchref[1]) and m_inter[0].contains(matchref[0]):
intersected2.append(m_inter[1])
for m_inter in intersected2:
if m_inter.contains(matchref[1]):
is_subset = True
num_dups += 1
break
if not is_subset:
match = [matchref[0].normal().replace("<d>",""),matchref[1].normal().replace("<d>","")]
mesorat_hashas.append(match)
mesorat_hashas_with_indexes.append({'match':match,'match_index':matchobj['match_index']})
category = category if isinstance(category, str) else '-'.join(category)
print("Num Subsets: {}".format(num_dups))
#objStr = json.dumps(mesorat_hashas_dicta, indent=4, ensure_ascii=False)
#with open('dicta_mesorat_hashas_{}.json'.format(category), "w") as f:
# f.write(objStr.encode('utf-8'))
objStr = json.dumps(mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_{}.json'.format(category), "w") as f:
f.write(objStr.encode('utf-8'))
objStr = json.dumps(mesorat_hashas_with_indexes, indent=4, ensure_ascii=False)
with open('mesorat_hashas_indexes_{}.json'.format(category), "w") as f:
f.write(objStr.encode('utf-8'))
def find_extra_spaces():
mesechta_names = ["Berakhot"]
for mes in mesechta_names:
segs = library.get_index(mes).all_segment_refs()
for seg in segs:
text = seg.text("he").text
if text != text.strip():
print(seg, text)
def find_gemara_stopwords():
mesechtot_names = [name for name in library.get_indexes_in_category("Talmud") if
not "Jerusalem " in name and not "Ritva " in name and not "Rif " in name]
word_counts = {}
for mes in mesechtot_names:
mes_tc = Ref(mes).text("he")
mes_str = mes_tc.ja().flatten_to_string()
mes_str_array = mes_str.split()
for w in mes_str_array:
if w not in word_counts:
word_counts[w] = 0
word_counts[w] += 1
sorted_wc = sorted(list(word_counts.items()), key=operator.itemgetter(1))
objStr = json.dumps(sorted_wc[-1:-1000:-1], indent=4, ensure_ascii=False)
with open('word_counts.json', "wb") as f:
f.write(objStr.encode('utf-8'))
def count_matches(mesorat_hashas_name):
matches = {}
mesorat_hashas = json.load(open(mesorat_hashas_name, 'rb'))
for l in mesorat_hashas:
tup_l = tuple(sorted(l))
if tup_l not in matches:
matches[tup_l] = 0
matches[tup_l] += 1
print(len(matches))
class Mesorah_Match_Ref:
def __init__(self,a,b):
"""
:param str a:
:param str b:
"""
a = a.replace("<d>","")
b = b.replace("<d>","")
yo = sorted((Ref(a),Ref(b)), key=lambda r: r.order_id())
self.a = yo[0]
self.b = yo[1]
def __eq__(self, other):
return self.a == other.a and self.b == other.b
def __lt__(self, other):
if self.a != other.a:
return self.a.order_id() < other.a.order_id()
else:
return self.b.order_id() < other.b.order_id()
def __gt__(self, other):
if self.a != other.a:
return self.a.order_id() > other.a.order_id()
else:
return self.b.order_id() > other.b.order_id()
def normal(self):
return [self.a.normal(), self.b.normal()]
def compare_mesorat_hashas(compare_a_name, compare_b_name):
compare_a = json.load(open(compare_a_name,'rb'))
compare_b = json.load(open(compare_b_name,'rb'))
def mesorah_match_sort(m1,m2):
if m1 < m2:
return -1
elif m1 > m2:
return 1
else:
return 0
compare_a_mmr = sorted([Mesorah_Match_Ref(m[0],m[1]) for m in compare_a], cmp=mesorah_match_sort)
compare_b_mmr = sorted([Mesorah_Match_Ref(m[0],m[1]) for m in compare_b], cmp=mesorah_match_sort)
inbnota = []
j = 0
for i,m in enumerate(compare_b_mmr):
if i % 1000 == 0:
print("({}/{})".format(i,len(compare_b)))
while compare_a_mmr[j] < m and j < len(compare_a_mmr) - 1:
j += 1
if compare_a_mmr[j] > m:
inbnota += [m.normal()]
"""
if m not in compare_a_mmr:
inbnota.append(compare_b[i])
"""
print("Num in B not in A: {}".format(len(inbnota)))
objStr = json.dumps(inbnota, indent=4, ensure_ascii=False)
with open('mesorat_hashas_comparison.json', "w") as f:
f.write(objStr.encode('utf-8'))
def filter_close_matches(mesorat_hashas_name):
max_cluster_dist = 20
filter_only_talmud = False
mesorat_hashas = json.load(open(mesorat_hashas_name,'rb'))
new_mesorat_hashas = set()
seg_map = {}
all_bad_links = set()
for l in mesorat_hashas:
if Ref(l[0]).order_id() < Ref(l[1]).order_id():
r = l[0]
ir = 0
else:
r = l[1]
ir = 1
other_r = Ref(l[int(ir == 0)])
if r not in seg_map:
seg_map[r] = set()
seg_map[r].add((Ref(r),other_r))
m = len(list(seg_map.items()))
for iseg, (strr, rset) in enumerate(seg_map.items()):
rray = list(rset)
if iseg % 100 == 0:
print("{}/{}".format(iseg,m))
n = len(rray)
dist_mat = np.zeros((n,n))
for i in range(n):
for j in range(i+1,n):
if i == j:
dist_mat[i,j] = 0
else:
try:
dist_mat[i,j] = rray[i][1].distance(rray[j][1])
except Exception:
dist_mat[i,j] = -1
clusters = []
non_clustered = set()
clustered_indexes = set()
for i in range(n):
for j in range(i+1,n):
if dist_mat[i,j] <= max_cluster_dist and dist_mat[i,j] != -1 and (rray[i][1].type == 'Talmud' or not filter_only_talmud):
#we've found an element in a cluster!
#figure out if a cluster already exists containing one of these guys
found = False
for c in clusters:
if rray[i][1] in c or rray[j][1] in c:
c.add(rray[i])
c.add(rray[j])
clustered_indexes.add(i)
clustered_indexes.add(j)
found = True
break
if not found:
c = set()
c.add(rray[i])
c.add(rray[j])
clustered_indexes.add(i)
clustered_indexes.add(j)
clusters += [c]
for ir, r in enumerate(rray):
if ir not in clustered_indexes:
non_clustered.add(r)
#if len(clusters) + len(non_clustered) > 5:
# print list(non_clustered)[0]
for c in clusters:
#add one from each cluster
other_r = None
for temp_other_r in c:
if other_r is None or temp_other_r[1].order_id() < other_r[1].order_id():
other_r = temp_other_r
c.remove(other_r)
for temp_other_r in c:
temp_link = tuple(sorted((str(temp_other_r[0]), str(temp_other_r[1])), key=lambda r: Ref(r).order_id()))
all_bad_links.add(temp_link)
temp_link = tuple(sorted((str(other_r[0]),str(other_r[1])),key=lambda r: Ref(r).order_id()))
# make sure temp_link isn't within max_dist of itself
try:
ref_obj1 = Ref(temp_link[0])
ref_obj2 = Ref(temp_link[1])
if (ref_obj1.type == 'Talmud' and ref_obj2.type == 'Talmud') or not filter_only_talmud:
temp_dist = ref_obj1.distance(ref_obj2,max_cluster_dist)
else:
temp_dist = -1
except Exception:
temp_dist = -1
if temp_dist == -1: # they're far away from each other
new_mesorat_hashas.add(temp_link)
for other_r in non_clustered:
temp_link = tuple(sorted((str(other_r[0]),str(other_r[1])),key=lambda r: Ref(r).order_id()))
# make sure temp_link isn't within max_dist of itself
try:
ref_obj1 = Ref(temp_link[0])
ref_obj2 = Ref(temp_link[1])
if (ref_obj1.type == 'Talmud' and ref_obj2.type == 'Talmud') or not filter_only_talmud:
temp_dist = ref_obj1.distance(ref_obj2,max_cluster_dist)
else:
temp_dist = -1
except Exception:
temp_dist = -1
if temp_dist == -1: # they're far away from each other
new_mesorat_hashas.add(temp_link)
filtered_mesorat_hashas = []
for l in new_mesorat_hashas:
if l not in all_bad_links:
lray = list(l)
filtered_mesorat_hashas += [lray]
else:
print(l)
print("Old: {} New: {} Difference: {}".format(len(mesorat_hashas),len(new_mesorat_hashas),len(mesorat_hashas)-len(new_mesorat_hashas)))
objStr = json.dumps(filtered_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_clustered_9_words.json', "w") as f:
f.write(objStr.encode('utf-8'))
def filter_pasuk_matches(category, mesorat_hashas_name):
def bible_tokenizer(s):
words = re.split(r'\s+',re.sub('\u05be', ' ',s))
words = [w for w in words if not ('[' in w and ']' in w) and w != ''] # strip out kri
return words
def talmud_tokenizer(s):
for match in re.finditer(r'\(.*?\)', s):
if library.get_titles_in_string(match.group()) and len(match.group().split()) <= 5:
s = s.replace(match.group(), "")
for phrase in stop_phrases:
s = s.replace(phrase, '')
words = [w for w in re.split(r'\s+',s) if w not in stop_words and w != '']
return words
num_nonpasuk_match_words = 4
mesorat_hashas = json.load(open(mesorat_hashas_name, 'rb'))
mesechtot_names = get_texts_from_category(category)
pickle_jar = {}
for mes in mesechtot_names:
pickle_jar[mes] = pickle.load(open('dicta_text_index_map/{}.pkl'.format(mes)))
matches = {}
for l in mesorat_hashas:
tup_l = tuple(sorted(l['match']))
if tup_l not in matches:
try:
matches[tup_l] = (Ref(tup_l[0]), Ref(tup_l[1]),l['match_index'])
except InputError:
pass
#mesorat_hashas = [{'match':['Berakhot 4a:1-3','Berakhot 5a:1-3'],'match_index':[[1,2],[3,4]]}]
bible_set_cache = {}
text_array_cache = {}
bible_array_cache = {}
new_mesorat_hashas = []
bad_mesorat_hashas = []
for ildict, (match_str_tup, (ref1, ref2, inds)) in enumerate(matches.items()):
if ildict % 100 == 0:
print("{}/{}--------------------------------------------".format(ildict,len(mesorat_hashas)))
bad_match = False
m = ref1.index.title
ind_list = pickle_jar[m][0]
for ir,rr in enumerate([ref1,ref2]):
try:
str_r = str(rr)
except UnicodeEncodeError:
continue
if str_r not in text_array_cache:
tt = talmud_tokenizer(rr.text("he").ja().flatten_to_string())
text_array_cache[str_r] = tt
biblset = rr.linkset().filter("Tanakh")
bible_set_cache[str_r] = biblset
else:
tt = text_array_cache[str_r]
biblset = bible_set_cache[str_r]
s = ind_list[bisect.bisect_right(ind_list, inds[ir][0]) - 1]
os = inds[ir][0] - s
oe = inds[ir][1] - s
match_len = oe-os + 1
tb = {yo: 0 for yo in range(os,oe+1)}
tt_slice = tt[os:oe+1]
for bl in biblset:
try:
if not Ref(bl.refs[1]).is_segment_level():
#print bl.refs[1]
continue
bt = bible_tokenizer(Ref(bl.refs[1]).text('he','Tanach with Text Only').as_string()) if bl.refs[1] not in bible_array_cache else bible_array_cache[bl.refs[1]]
except InputError as e:
print(e)
print("This ref is problematic {} on this Talmud ref {}".format(bl.refs[1],str(rr)))
continue
bs,be = get_maximum_subset_dh(tt_slice,bt,threshold=85)
if bs != -1 and be != -1:
for ib in range(bs+os,be+os):
tb[ib] = 1
#e = bisect.bisect_right(ind_list, inds[ir][1])-1
num_pasuk = sum(tb.values())
if match_len - num_pasuk < num_nonpasuk_match_words:
bad_match = True
break
if not bad_match:
new_mesorat_hashas.append(list(match_str_tup))
else:
bad_mesorat_hashas.append(list(match_str_tup))
print(bad_mesorat_hashas)
print("Old: {} New: {} Difference: {}".format(len(mesorat_hashas), len(new_mesorat_hashas),
len(mesorat_hashas) - len(new_mesorat_hashas)))
objStr = json.dumps(bad_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_pasuk_filtered_bad.json', "w") as f:
f.write(objStr.encode('utf-8'))
objStr = json.dumps(new_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_pasuk_filtered.json', "w") as f:
f.write(objStr.encode('utf-8'))
def remove_mishnah_talmud_dups(mesorat_hashas_name):
mesorat_hashas = json.load(open(mesorat_hashas_name, 'rb'))
mishnah_set = LinkSet({'type': 'mishnah in talmud'}).array()
mishnah_set = [(Ref(ms.refs[0]),Ref(ms.refs[1])) for ms in mishnah_set]
new_mesorat_hashas = []
for l in mesorat_hashas:
is_bad_link = False
try:
lr = sorted([Ref(l[0]),Ref(l[1])], key=lambda x: x.order_id())
except InputError:
continue
if lr[0] is None or lr[1] is None:
continue
if lr[0].type == 'Mishnah' and lr[1].type == 'Talmud':
for mish_link in mishnah_set:
if lr[0].overlaps(mish_link[0]) and lr[1].overlaps(mish_link[1]):
is_bad_link = True
break
if not is_bad_link:
new_mesorat_hashas += [l]
print("Old: {} New: {} Difference: {}".format(len(mesorat_hashas), len(new_mesorat_hashas),
len(mesorat_hashas) - len(new_mesorat_hashas)))
objStr = json.dumps(new_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_mishnah_filtered.json', "w") as f:
f.write(objStr.encode('utf-8'))
def dicta_all_to_all(base_path, comp_path, aaa, min_thresh, char_inds):
os.chdir('dicta_output/')
for f in glob.glob("*.txt"):
os.remove(f)
cmd = "mono FindAllSimilarPassages.exe -basepath=../{} -comppath=../{} -allagainstall={} -minthreshold={} -charindicies={}".format(base_path, comp_path, aaa, min_thresh, char_inds)
process = subprocess.Popen(cmd.split(), stdin=subprocess.PIPE)
output, error = process.communicate("")
#print output
os.chdir('../')
def get_texts_from_category(category):
if isinstance(category,str):
categories = [category]
else:
categories = category
text_names = []
for cat in categories:
if cat == "Talmud":
text_names += [name for name in library.get_indexes_in_category("Talmud") if
not "Jerusalem " in name and not "Ritva " in name and not "Rif " in name]
elif cat == "Mishnah" or cat == "Tosefta" or cat == "Tanakh":
text_names += library.get_indexes_in_category(cat)
elif cat == "All":
cats = ['Bavli','Mishnah', 'Tosefta','Midrash Rabbah']
text_names += ["Mekhilta d'Rabbi Yishmael", 'Seder Olam Rabbah','Sifra' ,'Mekhilta DeRabbi Shimon Bar Yochai','Sifrei Bamidbar','Megillat Taanit','Otzar Midrashim','Pirkei DeRabbi Eliezer','Pesikta D\'Rav Kahanna','Tanna Debei Eliyahu Rabbah','Tanna debei Eliyahu Zuta','Pesikta Rabbati']
for c in cats:
text_names += library.get_indexes_in_category(c)
elif cat == "Debug":
text_names += ["Berakhot"]
else:
text_names += []
return text_names
def filter_pasuk_matches2(cat1, cat2):
class TanakhLink:
def __init__(self, tref, to_ref, tref_index):
self.tref = tref
self.oref = Ref(tref)
self.original_to_ref = to_ref
self.to_ref_map = {
to_ref: tref_index
}
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def __hash__(self):
return self.oref.order_id().__hash__()
cat1 = cat1 if isinstance(cat1, str) else '-'.join(cat1)
cat2 = cat2 if isinstance(cat2, str) else '-'.join(cat2)
mes_shas1 = json.load(open('mesorat_hashas_indexes_{}.json'.format(cat1), 'rb'))
mes_shas2 = json.load(open('mesorat_hashas_indexes_{}.json'.format(cat2), 'rb'))
def get_tanakh_links(r1, r2):
tan1 = {}
tan2 = {}
for m in mes_shas2:
if m['match'][0] == r1:
tan1[m['match'][1]] = TanakhLink(m['match'][1], r1, m['match_index'][0])
elif m['match'][1] == r1:
tan1[m['match'][0]] = TanakhLink(m['match'][0], r1, m['match_index'][1])
if m['match'][0] == r2:
tan2[m['match'][1]] = TanakhLink(m['match'][1], r2, m['match_index'][0])
elif m['match'][1] == r2:
tan2[m['match'][0]] = TanakhLink(m['match'][0], r2, m['match_index'][1])
inter_keys = set(tan1.keys()).intersection(set(tan2.keys()))
inter = []
if len(inter_keys) == 0:
for temp_tan in list(tan1.values()):
temp_map = temp_tan.to_ref_map[temp_tan.original_to_ref]
if temp_map[1] - temp_map[0] + 1 > 6:
inter += [temp_tan]
for temp_tan in list(tan2.values()):
temp_map = temp_tan.to_ref_map[temp_tan.original_to_ref]
if temp_map[1] - temp_map[0] + 1 > 6:
inter += [temp_tan]
else:
for temp_int in inter_keys:
temp_tan1 = tan1[temp_int]
temp_tan2 = tan2[temp_int]
temp_tan1.to_ref_map[temp_tan2.original_to_ref] = temp_tan2.to_ref_map[temp_tan2.original_to_ref]
inter += [temp_tan1]
return inter
def inner_pasuk_filter(m):
tan_links = get_tanakh_links(m['match'][0], m['match'][1])
is_bad_match = False
for i, r in enumerate(m['match']):
ind_dict = {ind: 0 for ind in range(m['match_index'][i][0],m['match_index'][i][1] + 1)}
for temp_tan in tan_links:
try:
temp_map = temp_tan.to_ref_map[r]
except KeyError:
continue
for j in range(temp_map[0],temp_map[1]+1):
try:
ind_dict[j] = 1
except KeyError:
pass
num_in_pasuk = sum([v for k, v in list(ind_dict.items())])
num_in_match = m['match_index'][i][1] - m['match_index'][i][0] + 1
if num_in_match - num_in_pasuk < 4:
is_bad_match = True
break
return not is_bad_match
new_mesorat_hashas = []
bad_mesorat_hashas = []
for i, m in enumerate(mes_shas1):
if i % 1000 == 0:
print("{}/{}".format(i,len(mes_shas1)))
if inner_pasuk_filter(m):
new_mesorat_hashas += [m['match']]
else:
bad_mesorat_hashas += [m['match']]
print(bad_mesorat_hashas)
print("Old: {} New: {} Difference: {}".format(len(mes_shas1), len(new_mesorat_hashas),
len(mes_shas1) - len(new_mesorat_hashas)))
objStr = json.dumps(bad_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_pasuk_filtered_bad2.json', "w") as f:
f.write(objStr.encode('utf-8'))
objStr = json.dumps(new_mesorat_hashas, indent=4, ensure_ascii=False)
with open('mesorat_hashas_pasuk_filtered2.json', "w") as f:
f.write(objStr.encode('utf-8'))
def sort_n_save(mesorat_hashas_name):
mesorat_hashas = json.load(open(mesorat_hashas_name, 'rb'))
def mesorah_match_sort(m1,m2):
if m1 < m2:
return -1
elif m1 > m2:
return 1
else:
return 0
sorted_match_refs = sorted([Mesorah_Match_Ref(m[0],m[1]) for m in mesorat_hashas], cmp=mesorah_match_sort)
sorted_mh = [[mmr.a.normal(), mmr.b.normal()] for mmr in sorted_match_refs]
objStr = json.dumps(sorted_mh, indent=4, ensure_ascii=False)
with open(mesorat_hashas_name, "wb") as f:
f.write(objStr.encode('utf-8'))
#make_mesorat_hashas()
#minify_mesorat_hashas()
#find_most_quoted()
#save_links()
#clean_mesorat_hashas()
#find_bad_bad_gemaras()
#generate_dicta_input("Debug")
#dicta_all_to_all('dicta_input_debug/', '', True, 9, False)
#generate_dicta_output(["Debug"])
#filter_pasuk_matches2("Talmud", ["Tanakh", "Talmud"])
#filter_pasuk_matches('Debug','mesorat_hashas_indexes_Debug.json')
#filter_close_matches('mesorat_hashas_pasuk_filtered.json')
#remove_mishnah_talmud_dups('mesorat_hashas_clustered_9_words.json')
#save_links_dicta("All", 'mesorat_hashas_mishnah_filtered_ALL_READY_FOR_PROD.json')
compare_mesorat_hashas('mesorat_hashas_clustered_DICTA.json', 'mesorat_hashas_clustered_SEFARIA.json')
#save_links_post_request("All")
#count_matches('mesorat_hashas_mishnah_filtered.json')
#sort_n_save('mesorat_hashas_clustered_SEFARIA.json')
#sort_n_save('mesorat_hashas_clustered_DICTA.json')
|
import requests
from bs4 import BeautifulSoup
import pandas as pd
tags = ['covid19', 'coronavirus', 'pandemic', 'covid-19-crisis', 'quarantine']
years = ['2020']
months = ['03', '04']
days = []
for i in range(1, 32):
if len(str(i)) > 1:
days.append(str(i))
else:
days.append(str(0) + str(i))
link_data = []
def retrieve_urls():
for tag in tags:
for year in years:
for month in months:
for day in days:
URL = 'https://medium.com/tag/'+tag+'/archive/'+year+'/'+month+'/'+day
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'lxml')
article_containers = [article for article in soup.find_all('div', class_='cardChromeless')]
for article in article_containers:
read_more_section = article.find('a', class_='button')
try:
title = article.find('h3').contents[0]
post_id = read_more_section.attrs['data-post-id']
link = read_more_section.attrs['href']
link_data.append({'post_id': post_id,
'tag': tag,
'day': day,
'month': month,
'year': year,
'article_title': title,
'link': link
})
except:
continue
df = pd.DataFrame(link_data)
df.to_csv('medium_links.csv')
retrieve_urls()
# Link Format: https://medium.com/tag/covid-19-crisis/archive/2020/03/06
# 'https://medium.com/tag/'+tag+'/archive/'+year+'/'+month+'/'+day |
from flask import Flask
from flask import jsonify
from flask import request
from flask_cors import CORS
from flask_pymongo import PyMongo
from werkzeug.security import generate_password_hash
from werkzeug.security import check_password_hash
from string import punctuation
from Backend.Algorithm import generateTags
from Backend.Algorithm import get_tokens
from Backend.ConfluenceConnector import get_content
from Backend.ConfluenceConnector import check_Connection
from Backend.ConfluenceConnector import uploadTo_confluence
#install neccessary packages (including package _future_)
#Initiliazes the rest API as a flask app
app = Flask(__name__)
#Allows cross domain data transfer (required for data transfer between localhosts)
CORS(app)
#Check whether you have a database named projectdb, if not then create one.
app.config['MONGO_DBNAME'] = 'projectdb'
#Default MongoDB URI to connect to DB looks like this:
app.config['MONGO_URI'] = 'mongodb://localhost:27017/projectdb'
mongo = PyMongo(app)
#global variables for authentication purposes.
globalUser = ''
password = ''
url = ''
space = None
#After server is started it will show this message.
@app.route('/')
def hello_server():
return 'Server is up and running.'
#------User Methods-----#
#@Deprecated
#Register: creates a new user, hashes the password and saves it to DB. Returns new user name as JSON.
@app.route('/api/user/register', methods=['POST'])
def add_user():
users = mongo.db.users
name = ''
password =''
for key in request.form:
name = key['name']
password = key['password']
encryptedPassword = generate_password_hash(password)
new_user = users.insert({'name': name, 'password': encryptedPassword})
u = users.find_one({'_id': new_user})
output = {'name' : u['name']}
return jsonify({'result' : output}) #returns a json object and the HTTP status code (eg. 200: success, 400: failure etc.)
#Input: url, space key, username, password
#Output: username
#Registration: when a new user tries to login.
#Login: authenticates the user.
#Saves credentials to connect to Confluence.
@app.route('/api/user/login', methods=['POST'])
def login_user():
users = mongo.db.users
global url
url = request.values['url']
global space
if 'space' in request.values:
print('space key found')
space = request.values['space']
else:
space = None
global globalUser
globalUser = request.values['username']
global password
password = request.values['password']
encryptedPassword = generate_password_hash(password)
users.update_one({'name': globalUser},
{'$set': {'name': globalUser, 'password': encryptedPassword}}, upsert=True)
user = users.find_one({'name': globalUser})
hashedPassword = user['password'] #gets hashed password from DB
if check_password_hash(hashedPassword, password): #validates encrypted password with input password
#api = ConfluenceAPI(globalUser, password, url)
if check_Connection:
output = {'name' : user['name']}
return jsonify({'name' : output})
else:
return False
#------Confluence Methods-----#
#Input: document ID
#Output: generated tags
#Lists all documents from the DB related to logged in user.
@app.route('/api/confluencedata', methods=['GET', 'POST'])
def get_all_confluenceData():
confluencedata = mongo.db.confluencedata
output = []
for u in confluencedata.find({'name': globalUser}):
output.append({'documentId' : u['documentId'], 'title' : u['title'], 'date' : u['date'], 'body' : u['body'], 'tags' : u['tags']})
return jsonify({'result' : output}) #every return has HTTP status code
#Input: document ID
#Output: document
#Searches database and returns only matching ID and document
@app.route('/api/confluencedata/search/<Id>', methods=['GET'])
def get_document_by_Id(Id):
confluencedata = mongo.db.confluencedata
s = confluencedata.find_one({'documentId' : Id})
if s:
output = {'documentId': s['documentId'], 'title': s['title'], 'date': s['date'], 'body': s['body'], 'tags': s['tags']}
else:
output = "No document with ID: " + Id + " found."
return jsonify({'result' : output})
#Input: username, pasword, url, space
#Output: downloaded docuements
#Downloads data through Confluence API using the given login parameters and saves everything (data is binded to a user).
@app.route('/api/confluencedata/download', methods=['GET'])
def download_confluenceData():
confluenceData = mongo.db.confluencedata
if space != None:
documents = get_content(globalUser, password, url, space)
else:
documents = get_content(globalUser, password, url)
#Mapping of json values from Confluence documents. Extracted json structure has to be the same as database structure.
for doc in documents:
documentId = doc['id']
title = doc['title']
date = doc['history']['createdDate'][0:10]
body = doc['body']['storage']['value']
tags = ""
labels =[]
stringLabels = ''
for i in doc['metadata']['labels']['results']:
tags = i['label']
labels.append(tags)
stringLabels = ', '.join(labels)
tags = ""
confluenceData.update_one({'documentId': documentId},
{'$set': {'documentId': documentId, 'title': title, 'date': date, 'body': body, 'tags': stringLabels, 'name': 'se.bastian.esch@gmail.com'}}, upsert=True)
return jsonify(documents)
#------Tagging Methods-----#
#Input: document ID
#Output: generated tags
#Uses the tag function in Algorithm.py to generate new labels for the document.
tagged_text =''
@app.route('/api/confluencedata/tag', methods=['POST'])
def tag_document():
confluencedata = mongo.db.confluencedata
id = request.values['docId']
s = confluencedata.find_one({'documentId': id})
if s:
title = s['title']
body = s['body']
tagged_text = generateTags(title, body)
output = tagged_text
else:
output = "No document with ID: " + id + " was found."
return jsonify({'tags' : output})
#Input: document ID, original tags, newly generated tags
#Output: filtered tags which are saved to the DB
#Adds filtered new tags saved to the DB after comparing if certain tags already exists or not.
@app.route('/api/confluencedata/save/tag', methods=['POST'])
def save_tag():
confluencedata = mongo.db.confluencedata
id = request.values['docId']
originalTags = request.values['originalTags']
newTags = request.values['newTags']
newTags = ' '.join(word.strip(punctuation) for word in newTags.split()
if word.strip(punctuation))
s = confluencedata.find_one({'documentId': id})
originalTagsToken = get_tokens(originalTags)
newTagsToken = get_tokens(newTags)
print(newTagsToken)
if s:
newUniqueTags = [w for w in newTagsToken if not w in originalTagsToken]
print(newUniqueTags)
original_tags = ''.join(s['tags'])
print(original_tags)
if len(newUniqueTags) > 1:
newUniqueTagsString = ', '.join(newUniqueTags)
if len(original_tags) >= 1:
complete_tags = original_tags + ', ' + newUniqueTagsString
confluencedata.update_one({'documentId': id}, {'$set': {'tags': complete_tags}})
#uploadTo_confluence(complete_tags, id, globalUser, password, url)
output = complete_tags
else:
complete_tags = newUniqueTagsString
confluencedata.update_one({'documentId': id}, {'$set': {'tags': complete_tags}})
output = complete_tags
else:
output = original_tags
else:
output = "Tags could not be saved for document with ID: " + id
return jsonify({'tags': output})
if __name__ == '__main__':
app.run(debug=True)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 13:08:01 2015
@author: franciscojavierarceo
"""
import os
import pandas as pd
from gensim import corpora, models, similarities
from collections import defaultdict
os.chdir("/Users/franciscojavierarceo/")
df1 = pd.read_csv("NYTimesdata.csv")
df1.head()
frequency = defaultdict(int)
documents = df1["Title"].values
stoplist = set("for a of the and to in".split())
texts = [
[word for word in document.lower().split() if word not in stoplist]
for document in documents
]
texts
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > 1] for text in texts]
from pprint import pprint # pretty-printer
pprint(texts)
dictionary = corpora.Dictionary(texts)
print(dictionary)
print(dictionary.token2id)
new_doc = "Human computer interaction"
new_vec = dictionary.doc2bow(new_doc.lower().split())
new_vec
print(new_vec)
corpus = [dictionary.doc2bow(text) for text in texts]
corpora.MmCorpus.serialize("/tmp/deerwester.mm", corpus)
print(corpus)
class MyCorpus(object):
def __iter__(self):
for line in open("NYTimesdata.csv"):
# assume there's one document per line, tokens separated by whitespace
yield dictionary.doc2bow(line.lower().split())
corpus_memory_friendly = MyCorpus() # doesn't load the corpus into memory!
print(corpus_memory_friendly)
for vector in corpus_memory_friendly: # load one vector into memory at a time
print(vector)
from gensim import corpora
corpora.MmCorpus.serialize("/tmp/corpus.mm", corpus)
corpus = gensim.matutils.Sparse2Corpus(scipy_sparse_matrix)
scipy_csc_matrix = gensim.matutils.corpus2csc(corpus)
|
from PIL import Image
#func for create each bar
def plotBar(image, width, color, heigth=60):
return image.new("RGB", (width, heigth), color)
#main function
def dna_to_barcode(path):
with open(path) as file:
dna_sequence = file.read()
#separating each nitrogenous bases
dna = [char for char in dna_sequence]
#setting the colors of each base
adenine_color = "rgb(0,248,0)" #green
thymine_color = "rgb(255,0,0)" #red
guanine_color = "rgb(0,0,0)" #black
cytosine_color = "rgb(0,0,248)" #blue
#create the full image
barcode = Image.new("RGB", (len(dna), 60))
#variable for save the next drawable point in canvas
pixels_counter = 1
for b in dna:
if pixels_counter < len(dna):
if b.lower() == 'a':
bar = plotBar(Image, pixels_counter, adenine_color)
elif b.lower() == 't':
bar = plotBar(Image, pixels_counter, thymine_color)
elif b.lower() == 'c':
bar = plotBar(Image, pixels_counter, cytosine_color)
elif b.lower() == 'g':
bar = plotBar(Image, pixels_counter, guanine_color)
barcode.paste(bar, (pixels_counter, 0))
pixels_counter += 1
barcode.show()
barcode.save('../output/barcode.png', 'PNG')
path = '../docs/dna_sequence.txt'
dna_to_barcode(path)
|
from datetime import datetime, timedelta
from nonebot.log import logger
from tinydb import TinyDB
import time
from .bilireq import BiliReq
from .config import Config
from .dynamic import Dynamic
from .utils import safe_send, scheduler, get_path
last_time = {}
dynamic_history = TinyDB(get_path('history.json'), encoding='utf-8').table("dynamic_history")
@scheduler.scheduled_job('cron', second='*/10', id='dynamic_sched')
async def dy_sched():
"""直播推送"""
with Config() as config:
uid = config.next_uid('dynamic')
if not uid:
return
push_list = config.get_push_list(uid, 'dynamic')
name = push_list[0]['name']
logger.debug(f'爬取动态 {name}({uid})')
b = BiliReq()
dynamics = (await b.get_user_dynamics(uid)).get('cards', []) # 获取最近十二条动态
# config['uid'][uid]['name'] = dynamics[0]['desc']['user_profile']['info']['uname']
# await update_config(config)
if len(dynamics) == 0: # 没有发过动态或者动态全删的直接结束
return
if uid not in last_time: # 没有爬取过这位主播就把最新一条动态时间为 last_time
dynamic = Dynamic(dynamics[0])
last_time[uid] = dynamic.time
return
for dynamic in dynamics[4::-1]: # 从旧到新取最近5条动态
dynamic = Dynamic(dynamic)
if dynamic.time > last_time[uid] and dynamic.time > datetime.now().timestamp() - timedelta(minutes=10).seconds:
try:
await dynamic.get_screenshot()
except AttributeError:
return
await dynamic.format()
for sets in push_list:
await safe_send(sets['bot_id'], sets['type'], sets['type_id'], dynamic.message)
last_time[uid] = dynamic.time
# 动态历史表插入数据
dynamic_history.insert({'uid': uid, 'time': time.time()})
|
# coding: utf-8
"""Views related to administering postage."""
from __future__ import unicode_literals
import StringIO
import flask_login as login
# from flask.ext import login
import flask
from eisitirio import app
from eisitirio.database import db
from eisitirio.database import models
from eisitirio.helpers import login_manager
from eisitirio.helpers import unicode_csv
APP = flask.current_app#app.APP#DB = db.DB
from eisitirio.app import eisitiriodb as DB
ADMIN_POSTAGE = flask.Blueprint('admin_postage', __name__)
def get_postage_query(postage_type, unposted_only=True):
"""Get a query object for the postage entries."""
postage_query = models.Postage.query.filter(
models.Postage.paid == True # pylint: disable=singleton-comparison
).join(
models.Postage.owner
).filter(
models.Postage.cancelled == False # pylint: disable=singleton-comparison
).order_by(
models.Postage.postage_type
).order_by(
models.User.surname
).order_by(
models.User.forenames
)
if unposted_only:
postage_query = postage_query.filter(
models.Postage.posted == False # pylint: disable=singleton-comparison
)
if postage_type == 'graduand':
return postage_query.filter(
models.Postage.postage_type ==
APP.config['GRADUAND_POSTAGE_OPTION'].name
)
elif postage_type == 'posted':
return postage_query.filter(
models.Postage.postage_type !=
APP.config['GRADUAND_POSTAGE_OPTION'].name
)
return postage_query
@ADMIN_POSTAGE.route('/admin/postage/<postage_type>')
@ADMIN_POSTAGE.route('/admin/postage')
@ADMIN_POSTAGE.route('/admin/postage/<postage_type>/page/<int:page>')
@ADMIN_POSTAGE.route('/admin/postage/page/<int:page>')
@login.login_required
@login_manager.admin_required
def postage_dashboard(postage_type=None, page=1):
"""Provide an interface for packing posted tickets."""
return flask.render_template(
'admin_postage/postage_dashboard.html',
postage_entries=get_postage_query(postage_type).paginate(page=page),
postage_type=postage_type,
page=page
)
@ADMIN_POSTAGE.route('/admin/postage/<int:postage_id>/mark_posted')
@login.login_required
@login_manager.admin_required
def mark_as_posted(postage_id):
"""Mark a postage entry as packed/posted."""
postage = models.Postage.query.get_or_404(postage_id)
if not postage:
flask.flash('Could not load postage entry', 'error')
elif not all(ticket.collected for ticket in postage.tickets):
flask.flash('Not all of the tickets for this entry have been assigned.',
'error')
else:
postage.posted = True
DB.session.commit()
flask.flash('Entry marked as packed/posted.', 'success')
return flask.redirect(flask.request.referrer or
flask.url_for('admin_postage.postage_dashboard'))
@ADMIN_POSTAGE.route('/admin/postage/export/<unposted_only>')
@ADMIN_POSTAGE.route('/admin/postage/export/<unposted_only>/<postage_type>')
@login.login_required
@login_manager.admin_required
def export_postage(unposted_only, postage_type=None):
"""Export postage entries as CSV.
Exports the statistics used to render the graphs as a CSV file.
"""
csvdata = StringIO.StringIO()
csvwriter = unicode_csv.UnicodeWriter(csvdata)
csvwriter.writerow(
[
'user_id',
'user_name',
'postage_type',
'address',
'status',
'num_tickets',
'ticket_ids',
]
)
for postage in get_postage_query(postage_type, unposted_only == 'unposted'):
csvwriter.writerow(
[
postage.owner.object_id,
postage.owner.full_name,
postage.postage_type,
postage.address if postage.address is not None else 'N/A',
'{0}Posted/Packed'.format('' if postage.posted else 'Not '),
postage.tickets.count(),
';'.join(str(ticket.object_id) for ticket in postage.tickets),
]
)
csvdata.seek(0)
return flask.send_file(csvdata, mimetype='text/csv', cache_timeout=900)
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG 蓝鲸日志平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG 蓝鲸日志平台 is licensed under the MIT License.
License for BK-LOG 蓝鲸日志平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
from blueapps.conf.log import get_logging_config_dict
from blueapps.conf.default_settings import * # noqa
from django.utils.translation import ugettext_lazy as _
# 这里是默认的 INSTALLED_APPS,大部分情况下,不需要改动
# 如果你已经了解每个默认 APP 的作用,确实需要去掉某些 APP,请去掉下面的注释,然后修改
# INSTALLED_APPS = (
# 'bkoauth',
# # 框架自定义命令
# 'blueapps.contrib.bk_commands',
# 'django.contrib.admin',
# 'django.contrib.auth',
# 'django.contrib.contenttypes',
# 'django.contrib.sessions',
# 'django.contrib.sites',
# 'django.contrib.messages',
# 'django.contrib.staticfiles',
# # account app
# 'blueapps.account',
# )
# 请在这里加入你的自定义 APP
INSTALLED_APPS += (
# must first django_jsonfield_backport
"django_jsonfield_backport",
"django_prometheus",
"rest_framework",
"iam.contrib.iam_migration",
"apps.iam",
"apps.api",
"apps.log_search",
"apps.log_audit",
"apps.log_databus",
"apps.log_esquery",
"apps.log_measure",
"apps.log_trace",
"apps.esb",
"apps.bk_log_admin",
"apps.grafana",
"bk_monitor",
"home_application",
"pipeline",
"pipeline.log",
"pipeline.engine",
"pipeline.component_framework",
"pipeline.django_signal_valve",
"django_celery_beat",
"django_celery_results",
"apps.log_extract",
"apps.feature_toggle",
)
# BKLOG后台接口:默认否,后台接口session不写入本地数据库
BKAPP_IS_BKLOG_API = os.environ.get("BKAPP_IS_BKLOG_API", 0)
if BKAPP_IS_BKLOG_API:
SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
else:
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
INSTALLED_APPS += ("version_log",)
# 这里是默认的中间件,大部分情况下,不需要改动
# 如果你已经了解每个默认 MIDDLEWARE 的作用,确实需要去掉某些 MIDDLEWARE,或者改动先后顺序,请去掉下面的注释,然后修改
MIDDLEWARE = (
"django_prometheus.middleware.PrometheusBeforeMiddleware",
# request instance provider
"blueapps.middleware.request_provider.RequestProvider",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"blueapps.middleware.xss.middlewares.CheckXssMiddleware",
# 跨域检测中间件, 默认关闭
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
"django.middleware.security.SecurityMiddleware",
# 蓝鲸静态资源服务
"whitenoise.middleware.WhiteNoiseMiddleware",
# Auth middleware
"blueapps.account.middlewares.BkJwtLoginRequiredMiddleware",
"blueapps.account.middlewares.WeixinLoginRequiredMiddleware",
"blueapps.account.middlewares.LoginRequiredMiddleware",
# exception middleware
"blueapps.core.exceptions.middleware.AppExceptionMiddleware",
# 自定义中间件
"django.middleware.locale.LocaleMiddleware",
"apps.middlewares.CommonMid",
"apps.middleware.user_middleware.UserLocalMiddleware",
"django_prometheus.middleware.PrometheusAfterMiddleware",
)
# 所有环境的日志级别可以在这里配置
# LOG_LEVEL = 'INFO'
# ===============================================================================
# 静态资源配置
# ===============================================================================
# 静态资源文件(js,css等)在APP上线更新后, 由于浏览器有缓存,
# 可能会造成没更新的情况. 所以在引用静态资源的地方,都把这个加上
# Django 模板中:<script src="/a.js?v={{ STATIC_VERSION }}"></script>
# mako 模板中:<script src="/a.js?v=${ STATIC_VERSION }"></script>
# 如果静态资源修改了以后,上线前改这个版本号即可
#
STATIC_VERSION = "1.0"
STATICFILES_DIRS = [os.path.join(BASE_DIR, "static")]
# ==============================================================================
# SENTRY相关配置
# ==============================================================================
SENTRY_DSN = os.environ.get("SENTRY_DSN")
if SENTRY_DSN:
RAVEN_CONFIG = {
"dsn": SENTRY_DSN,
}
# ==============================================================================
# CELERY相关配置
# ==============================================================================
# CELERY 开关,使用时请改为 True,修改项目目录下的 Procfile 文件,添加以下两行命令:
# worker: python manage.py celery worker -l info
# beat: python manage.py celery beat -l info
# 不使用时,请修改为 False,并删除项目目录下的 Procfile 文件中 celery 配置
IS_USE_CELERY = True
# CELERY 并发数,默认为 2,可以通过环境变量或者 Procfile 设置
CELERYD_CONCURRENCY = os.getenv("BK_CELERYD_CONCURRENCY", 2)
CELERY_TASK_SERIALIZER = "pickle"
CELERY_ACCEPT_CONTENT = ["pickle"]
# CELERY 配置,申明任务的文件路径,即包含有 @task 装饰器的函数文件
CELERY_IMPORTS = (
"apps.log_search.tasks.bkdata",
"apps.log_search.tasks.async_export",
"apps.log_search.tasks.project",
"apps.log_search.handlers.index_set",
"apps.log_search.tasks.mapping",
"apps.log_databus.tasks.collector",
"apps.log_databus.tasks.itsm",
"apps.log_databus.tasks.bkdata",
"apps.log_databus.tasks.archive",
"apps.log_measure.tasks.report",
"apps.log_extract.tasks",
)
# load logging settings
if RUN_VER != "open":
LOGGING = get_logging_config_dict(locals())
LOGGING["handlers"]["root"]["encoding"] = "utf-8"
LOGGING["handlers"]["component"]["encoding"] = "utf-8"
LOGGING["handlers"]["mysql"]["encoding"] = "utf-8"
LOGGING["handlers"]["blueapps"]["encoding"] = "utf-8"
if not IS_LOCAL:
logging_format = {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"fmt": (
"%(levelname)s %(asctime)s %(pathname)s %(lineno)d "
"%(funcName)s %(process)d %(thread)d %(message)s"
"$(otelTraceID)s $(otelSpanID)s %(otelServiceName)s"
),
}
LOGGING["formatters"]["verbose"] = logging_format
BKLOG_UDP_LOG = os.getenv("BKAPP_UDP_LOG", "off") == "on"
if BKLOG_UDP_LOG:
LOG_UDP_SERVER_HOST = os.getenv("BKAPP_UDP_LOG_SERVER_HOST", "")
LOG_UDP_SERVER_PORT = int(os.getenv("BKAPP_UDP_LOG_SERVER_PORT", 0))
LOG_LEVEL = os.getenv("LOG_LEVEL", "INFO")
LOGGING = {
"version": 1,
"formatters": {
"json": {
"()": "pythonjsonlogger.jsonlogger.JsonFormatter",
"fmt": (
"%(levelname)s %(asctime)s %(pathname)s %(lineno)d "
"%(funcName)s %(process)d %(thread)d %(message)s "
"$(otelTraceID)s $(otelSpanID)s %(otelServiceName)s"
),
}
},
"handlers": {
"udp": {
"formatter": "json",
"class": "apps.utils.log.UdpHandler",
"host": LOG_UDP_SERVER_HOST,
"port": LOG_UDP_SERVER_PORT,
},
"stdout": {
"class": "logging.StreamHandler",
"formatter": "json",
"stream": sys.stdout,
},
},
"loggers": {
"django": {"handlers": ["udp"], "level": "INFO", "propagate": True},
"django.server": {
"handlers": ["udp"],
"level": LOG_LEVEL,
"propagate": True,
},
"django.request": {
"handlers": ["udp"],
"level": "ERROR",
"propagate": True,
},
"django.db.backends": {
"handlers": ["udp"],
"level": LOG_LEVEL,
"propagate": True,
},
# the root logger ,用于整个project的logger
"root": {"handlers": ["udp"], "level": LOG_LEVEL, "propagate": True},
# 组件调用日志
"component": {
"handlers": ["udp"],
"level": LOG_LEVEL,
"propagate": True,
},
"celery": {"handlers": ["udp"], "level": LOG_LEVEL, "propagate": True},
# other loggers...
# blueapps
"blueapps": {
"handlers": ["udp"],
"level": LOG_LEVEL,
"propagate": True,
},
# 普通app日志
"app": {"handlers": ["udp"], "level": LOG_LEVEL, "propagate": True},
},
}
OTLP_TRACE = os.getenv("BKAPP_OTLP_TRACE", "off") == "on"
OTLP_GRPC_HOST = os.getenv("BKAPP_OTLP_GRPC_HOST", "http://localhost:4317")
OTLP_BK_DATA_ID = int(os.getenv("BKAPP_OTLP_BK_DATA_ID", 1000))
# ===============================================================================
# 项目配置
# ===============================================================================
BK_PAAS_HOST = os.environ.get("BK_PAAS_HOST", "")
# ESB API调用前辍
PAAS_API_HOST = BK_PAAS_HOST
BK_PAAS_INNER_HOST = os.environ.get("BK_PAAS_INNER_HOST", BK_PAAS_HOST)
BK_CC_HOST = BK_PAAS_HOST.replace("paas", "cmdb")
BKDATA_URL = BK_PAAS_HOST
MONITOR_URL = ""
BK_DOC_URL = "https://bk.tencent.com/docs/"
BK_DOC_QUERY_URL = "https://bk.tencent.com/docs/document/5.1/90/3822/"
BK_FAQ_URL = "https://bk.tencent.com/s-mart/community"
# 计算平台文档地址
BK_DOC_DATA_URL = ""
BK_HOT_WARM_CONFIG_URL = (
"https://www.elastic.co/guide/en/elasticsearch/reference/master/modules-cluster.html#shard-allocation-awareness"
)
# bulk_request limit
BULK_REQUEST_LIMIT = int(os.environ.get("BKAPP_BULK_REQUEST_LIMIT", 500))
# redis_version
REDIS_VERSION = int(os.environ.get("BKAPP_REDIS_VERSION", 2))
# 该配置需要等待SITE_URL被patch掉才能正确配置,因此放在patch逻辑后面
GRAFANA = {
"HOST": os.getenv("BKAPP_GRAFANA_URL", ""),
"PREFIX": "{}grafana/".format(os.getenv("BKAPP_GRAFANA_PREFIX", SITE_URL)),
"ADMIN": (os.getenv("BKAPP_GRAFANA_ADMIN_USERNAME", "admin"), os.getenv("BKAPP_GRAFANA_ADMIN_PASSWORD", "admin")),
"PROVISIONING_CLASSES": ["apps.grafana.provisioning.Provisioning", "apps.grafana.provisioning.TraceProvisioning"],
"PERMISSION_CLASSES": ["apps.grafana.permissions.BizPermission"],
}
# 是否可以跨业务创建索引集
Index_Set_Cross_Biz = False
CONF_PATH = os.path.abspath(__file__)
PROJECT_ROOT = os.path.dirname(os.path.dirname(CONF_PATH))
# BASE_DIR = os.path.dirname(PROJECT_ROOT)
PYTHON_BIN = os.path.dirname(sys.executable)
INIT_SUPERUSER = []
DEBUG = False
SHOW_EXCEPTION_DETAIL = False
# 敏感参数
SENSITIVE_PARAMS = ["app_code", "app_secret", "bk_app_code", "bk_app_secret", "auth_info"]
# esb模块中esb允许转发的接口
ALLOWED_MODULES_FUNCS = {
"apps.log_databus.views.collector_views": {"tail": "tail"},
"apps.log_databus.views.storage_views": {"connectivity_detect": "connectivity_detect"},
}
# esb模块中转发meta接口的传发设置
META_ESB_FORWARD_CONFIG = {
"create_es_snapshot_repository": {
"iam_key": "cluster_id",
"target_call": "create_es_snapshot_repository",
"iam_actions": ["manage_es_source"],
"iam_resource": "es_source",
},
"modify_es_snapshot_repository": {
"iam_key": "cluster_id",
"target_call": "modify_es_snapshot_repository",
"iam_actions": ["manage_es_source"],
"iam_resource": "es_source",
},
"delete_es_snapshot_repository": {
"iam_key": "cluster_id",
"target_call": "delete_es_snapshot_repository",
"iam_actions": ["manage_es_source"],
"iam_resource": "es_source",
},
"verify_es_snapshot_repository": {"is_view_permission": True, "target_call": "verify_es_snapshot_repository"},
}
# resf_framework
REST_FRAMEWORK = {
"DATETIME_FORMAT": "%Y-%m-%d %H:%M:%S",
"EXCEPTION_HANDLER": "apps.generic.custom_exception_handler",
"SEARCH_PARAM": "keyword",
"DEFAULT_RENDERER_CLASSES": ("rest_framework.renderers.JSONRenderer",),
}
# 是否同步业务
USING_SYNC_BUSINESS = True
# ==============================================================================
# 国际化相关配置
# ==============================================================================
# 时区
USE_TZ = True
TIME_ZONE = "Asia/Shanghai"
# 数据平台后台时区
TRANSFER_TIME_ZONE = "GMT"
DATAAPI_TIME_ZONE = "Etc/GMT-8"
BKDATA_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S%z"
# admin时间显示
DATETIME_FORMAT = "Y-m-d H:i:s"
# 翻译
USE_I18N = True
USE_L10N = True
LANGUAGE_CODE = "zh-cn"
LOCALEURL_USE_ACCEPT_LANGUAGE = True
LANGUAGES = (("en", "English"), ("zh-cn", "简体中文"))
LANGUAGE_SESSION_KEY = "blueking_language"
LANGUAGE_COOKIE_NAME = "blueking_language"
# 设定使用根目录的locale
LOCALE_PATHS = (os.path.join(PROJECT_ROOT, "locale"),)
# ===============================================================================
# Authentication
# ===============================================================================
AUTH_USER_MODEL = "account.User"
AUTHENTICATION_BACKENDS = (
"blueapps.account.backends.BkJwtBackend",
"blueapps.account.backends.UserBackend",
"django.contrib.auth.backends.ModelBackend",
)
REDIRECT_FIELD_NAME = "c_url"
# 验证登录的cookie名
BK_COOKIE_NAME = "bk_token"
BK_SUPPLIER_ACCOUNT = os.getenv("BKAPP_BK_SUPPLIER_ACCOUNT", "")
# 数据库初始化 管理员列表
ADMIN_USERNAME_LIST = ["admin"]
SYSTEM_USE_API_ACCOUNT = "admin"
# 权限
AUTH_TYPE = "RBAC"
ACTION_PROJECT_MANAGE = "project.manage"
ACTION_PROJECT_RETRIEVE = "project.retrieve"
ACTION_INDEX_SET_RETRIEVE = "index_set.retrieve"
ACTION_INDEX_SET_MANAGE = "project.manage"
ACTION_DATABUS_MANAGE = "project.manage"
ACTION_EXTRACT_MANAGE = "project.manage"
ACTION_MEASURE = "project.manage"
# 数据平台鉴权方式
BKDATA_DATA_APP_CODE = os.getenv("BKAPP_BKDATA_DATA_APP_CODE", APP_CODE)
BKDATA_DATA_TOKEN_ID = os.getenv("BKAPP_BKDATA_DATA_TOKEN_ID", 0)
BKDATA_DATA_TOKEN = os.getenv("BKAPP_BKDATA_DATA_TOKEN", "")
# 登录窗口大小
IFRAME_HEIGHT = int(os.getenv("BKAPP_IFRAME_HEIGHT", 400))
IFRAME_WIDTH = int(os.getenv("BKAPP_IFRAME_WIDTH", 400))
# ===============================================================================
# FeatureToggle 特性开关:以内部版为准,其它版本根据需求调整
# 此配置以V4.2.X企业版做为默认配置,其它版本按需进行调整
# ===============================================================================
FEATURE_TOGGLE = {
# 菜单:apps.log_search.handlers.meta.MetaHandler.get_menus
# 索引集管理-数据源
"scenario_log": os.environ.get("BKAPP_FEATURE_SCENARIO_LOG", "on"), # 采集
"scenario_bkdata": "on", # 数据平台
"scenario_es": os.environ.get("BKAPP_FEATURE_SCENARIO_ES", "on"), # 第三方ES:依赖采集接入
# 日志采集-字段类型
"es_type_object": os.environ.get("BKAPP_ES_TYPE_OBJECT", "on"),
"es_type_nested": os.environ.get("BKAPP_ES_TYPE_NESTED", "off"),
# 是否使用数据平台token鉴权
"bkdata_token_auth": os.environ.get("BKAPP_BKDATA_TOKEN_AUTH", "off"),
# 提取cos链路开关
"extract_cos": os.environ.get("BKAPP_EXTRACT_COS", "off"),
# 采集接入ITSM
"collect_itsm": os.environ.get("BKAPP_COLLECT_ITSM", "off"),
# 自定义指标上报
"monitor_report": os.environ.get("BKAPP_MONITOR_REPORT", "on"),
}
SAAS_MONITOR = "bk_monitorv3"
SAAS_BKDATA = "bk_dataweb"
# 前端菜单配置
MENUS = [
{"id": "retrieve", "name": _("检索"), "feature": "on", "icon": ""},
{
"id": "trace",
"name": _("调用链"),
"feature": "on",
"icon": "",
"children": [
{"id": "trace_list", "name": _("调用链列表"), "feature": "on", "icon": ""},
{"id": "trace_detail", "name": _("调用链详情"), "feature": "on", "icon": ""},
],
},
{"id": "extract", "name": _("日志提取"), "feature": "on", "icon": ""},
{"id": "monitor", "name": _("监控策略"), "feature": "on", "icon": ""},
{
"id": "dashboard",
"name": _("仪表盘"),
"feature": "on" if GRAFANA["HOST"] else "off",
"icon": "",
"children": [
{"id": "create_dashboard", "name": _("新建仪表盘"), "feature": "on", "icon": ""},
{"id": "create_folder", "name": _("新建目录"), "feature": "on", "icon": ""},
{"id": "import_dashboard", "name": _("导入仪表盘"), "feature": "on", "icon": ""},
],
},
{
"id": "manage",
"name": _("管理"),
"feature": "on",
"icon": "",
"children": [
{
"id": "manage_access",
"name": _("日志接入"),
"feature": "on",
"icon": "",
"keyword": _("接入"),
"children": [
{
"id": "log_collection",
"name": _("日志采集"),
"feature": "on",
"scenes": "scenario_log",
"icon": "document",
},
{
"id": "bk_data_collection",
"name": _("计算平台"),
"feature": FEATURE_TOGGLE["scenario_bkdata"],
"scenes": "scenario_bkdata",
"icon": "calculation-fill",
},
{
"id": "es_collection",
"name": _("第三方ES"),
"feature": "on",
"scenes": "scenario_es",
"icon": "elasticsearch",
},
{"id": "custom_collection", "name": _("自定义接入"), "feature": "off", "icon": ""},
],
},
{
"id": "log_clean",
"name": _("日志清洗"),
"feature": "on",
"icon": "",
"keyword": _("清洗"),
"children": [
{
"id": "clean_list",
"name": _("清洗列表"),
"feature": "on",
"scenes": "scenario_log",
"icon": "info-fill--2",
},
{
"id": "clean_templates",
"name": _("清洗模板"),
"feature": "on",
"icon": "moban",
},
],
},
{
"id": "log_archive",
"name": _("日志归档"),
"feature": "on",
"icon": "",
"keyword": "归档",
"children": [
{
"id": "archive_repository",
"name": _("归档仓库"),
"feature": "on",
"icon": "new-_empty-fill",
},
{
"id": "archive_list",
"name": _("归档列表"),
"feature": "on",
"icon": "audit-fill",
},
{
"id": "archive_restore",
"name": _("归档回溯"),
"feature": "on",
"icon": "withdraw-fill",
},
],
},
{
"id": "manage_extract_strategy",
"name": _("日志提取"),
"icon": "",
"keyword": _("提取"),
"feature": os.environ.get("BKAPP_FEATURE_EXTRACT", "on"),
"children": [
{"id": "manage_log_extract", "name": _("日志提取配置"), "feature": "on", "icon": "cc-log"},
{"id": "extract_link_manage", "name": _("提取链路管理"), "feature": "on", "icon": "assembly-line-fill"},
],
},
{
"id": "log_archive",
"name": _("日志归档"),
"feature": "off",
"icon": "",
"children": [{"id": "log_archive_conf", "name": _("日志归档"), "feature": "off", "icon": ""}],
},
{
"id": "trace_track",
"name": _("全链路追踪"),
"feature": os.environ.get("BKAPP_FEATURE_TRACE", "on"),
"icon": "",
"keyword": "trace",
"children": [
{
"id": "collection_track",
"name": _("采集接入"),
"feature": "off",
"scenes": "scenario_log",
"icon": "",
},
{
"id": "bk_data_track",
"name": _("计算平台"),
"feature": FEATURE_TOGGLE["scenario_bkdata"],
"scenes": "scenario_bkdata",
"icon": "cc-cabinet",
},
{"id": "bk_data_track", "name": _("第三方ES"), "feature": "off", "scenes": "scenario_es", "icon": ""},
{"id": "sdk_track", "name": _("SDK接入"), "feature": "off", "icon": ""},
],
},
{
"id": "es_cluster_status",
"name": _("ES集群"),
"feature": "on",
"icon": "",
"keyword": _("集群"),
"children": [{"id": "es_cluster_manage", "name": _("集群管理"), "feature": "on", "icon": "cc-influxdb"}],
},
{
"id": "manage_data_link",
"name": _("设置"),
"feature": os.environ.get("BKAPP_FEATURE_DATA_LINK", "on"),
"icon": "",
"keyword": _("设置"),
"children": [
{"id": "manage_data_link_conf", "name": _("采集链路管理"), "feature": "on", "icon": "log-setting"}
],
},
],
},
]
# TAM
TAM_AEGIS_KEY = os.environ.get("BKAPP_TAM_AEGIS_KEY", "")
# 任务过期天数
EXTRACT_EXPIRED_DAYS = int(os.getenv("BKAPP_EXTRACT_EXPIRED_DAYS", 1))
# windows系统名称列表
WINDOWS_OS_NAME_LIST = [
os_name.lower() for os_name in os.getenv("BKAPP_WINDOWS_OS_NAME", "xserver,windows").split(",") if os_name
]
# 中转服务器配置
EXTRACT_FILE_PATTERN_CHARACTERS = os.getenv("BKAPP_EXTRACT_FILE_PATTERN_CHARACTERS", r"():@\[\]a-zA-Z0-9._/*-~")
EXTRACT_SAAS_STORE_DIR = os.getenv("BKAPP_EXTRACT_SAAS_STORY_DIR", "/data/app/code/USERRES")
EXTRACT_TRANSIT_EXPIRED = int(os.getenv("BKAPP_EXTRACT_TRANSIT_EXPIRED", 60 * 5))
EXTRACT_DISTRIBUTION_DIR = os.getenv("BKAPP_EXTRACT_DISTRIBUTION_DIR", "/data/bk_log_extract/distribution/")
EXTRACT_COS_DOMAIN = os.getenv("BKAPP_EXTRACT_COS_DOMAIN")
# 最大打包文件大小限制 单位为Mb 默认2048Mb
EXTRACT_PACK_MAX_FILE_SZIE_LIMIT = int(os.getenv("BKAPP_EXTRACT_PACK_MAX_FILE_SZIE_LIMIT", 2048))
# 同时下载的文件数量限制
CSTONE_DOWNLOAD_FILES_LIMIT = int(os.getenv("BKAPP_CSTONE_DOWNLOAD_FILES_LIMIT", 10))
# 过期pipeline任务超时时间设定
PIPELINE_TASKS_EXPIRED_TIME = os.getenv("BKAPP_PIPELINE_TASKS_EXPIRED_TIME", 24)
# Windows 机器JOB执行账户
WINDOWS_ACCOUNT = os.getenv("BKAPP_WINDOWS_ACCOUNT", "system")
# pipeline 配置
from pipeline.celery.settings import CELERY_QUEUES as PIPELINE_CELERY_QUEUES
from pipeline.celery.settings import CELERY_ROUTES as PIPELINE_CELERY_ROUTES
CELERY_ROUTES = PIPELINE_CELERY_ROUTES
CELERY_QUEUES = PIPELINE_CELERY_QUEUES
# ===============================================================================
# databus
# ===============================================================================
TABLE_ID_PREFIX = "bklog"
ES_DATE_FORMAT = os.environ.get("BKAPP_ES_DATE_FORMAT", "%Y%m%d")
ES_SHARDS_SIZE = int(os.environ.get("BKAPP_ES_SHARDS_SIZE", 30))
ES_SLICE_GAP = int(os.environ.get("BKAPP_ES_SLICE_GAP", 60))
ES_SHARDS = int(os.environ.get("BKAPP_ES_SHARDS", 3))
ES_REPLICAS = int(os.environ.get("BKAPP_ES_REPLICAS", 1))
ES_STORAGE_DEFAULT_DURATION = int(os.environ.get("BKAPP_ES_STORAGE_DURATION", 7))
ES_PRIVATE_STORAGE_DURATION = int(os.environ.get("BKAPP_ES_PRIVATE_STORAGE_DURATION", 365))
ES_PUBLIC_STORAGE_DURATION = int(os.environ.get("BKAPP_ES_PUBLIC_STORAGE_DURATION", 7))
# 公共集群存储容量限制
ES_STORAGE_CAPACITY = os.environ.get("BKAPP_ES_STORAGE_CAPACITY", 0)
# ES兼容:默认关闭,可以通过环境变量调整
ES_COMPATIBILITY = int(os.environ.get("BKAPP_ES_COMPATIBILITY", 0))
# scroll滚动查询:默认关闭,通过环境变量控制
FEATURE_EXPORT_SCROLL = os.environ.get("BKAPP_FEATURE_EXPORT_SCROLL", False)
# BCS
PAASCC_APIGATEWAY = ""
# 日志采集器配置
# 日志文件多久没更新则不再读取
COLLECTOR_CLOSE_INACTIVE = 86400
# 行日志一次上报条数
COLLECTOR_ROW_PACKAGE_COUNT = 100
# 段日志一次上报条数
COLLECTOR_SECTION_PACKAGE_COUNT = 10
# 系统支持的清洗类型
COLLECTOR_SCENARIOS = os.environ.get("BKAPP_COLLECTOR_SCENARIOS", "row,section").split(",")
# 接入指引
COLLECTOR_GUIDE_URL = os.environ.get("BKAPP_COLLECTOR_GUIDE_URL", "")
# ITSM接入服务ID
COLLECTOR_ITSM_SERVICE_ID = int(os.environ.get("BKAPP_COLLECTOR_ITSM_SERVICE_ID", 0))
ITSM_LOG_DISPLAY_ROLE = "LOG_SEARCH"
BLUEKING_BK_BIZ_ID = int(os.environ.get("BKAPP_BLUEKING_BK_BIZ_ID", 2))
BKMONITOR_CUSTOM_PROXY_IP = os.environ.get(
"BKAPP_BKMONITOR_CUSTOM_PROXY_IP", "http://report.bkmonitorv3.service.consul:10205"
)
# ===============================================================================
# EsQuery
# ===============================================================================
ES_QUERY_ACCESS_LIST: list = ["bkdata", "es", "log"]
ES_QUERY_TIMEOUT = int(os.environ.get("BKAPP_ES_QUERY_TIMEOUT", 55))
# ESQUERY 查询白名单,直接透传
ESQUERY_WHITE_LIST = [
"bk_log_search",
"hippogriff-4",
"bk_bklog",
"bk_monitor",
"bk_bkmonitor",
"bk_monitorv3",
"bk_bkmonitorv3",
"log-trace",
"log-search-4",
"bkmonitorv3",
"bk-log-search",
"gem3",
"data",
"dataweb",
]
# ===============================================================================
# Demo业务配置
# ===============================================================================
BIZ_ACCESS_URL = os.getenv("BKAPP_BIZ_ACCESS_URL", "")
DEMO_BIZ_ID = int(os.getenv("BKAPP_DEMO_BIZ_ID") or 0)
DEMO_BIZ_EDIT_ENABLED = bool(os.getenv("BKAPP_DEMO_BIZ_EDIT_ENABLED", ""))
# ==============================================================================
# 仅用于调试:可根据需要设置此环境变量关闭CORS限制
# ==============================================================================
if os.getenv("BKAPP_CORS_ENABLED", "on") == "off":
# allow all hosts
CORS_ORIGIN_ALLOW_ALL = True
MIDDLEWARE += ("corsheaders.middleware.CorsMiddleware",)
# cookies will be allowed to be included in cross-site HTTP requests
CORS_ALLOW_CREDENTIALS = True
# ==============================================================================
# consul
# ==============================================================================
CONSUL_CLIENT_CERT_FILE_PATH = os.getenv("CONSUL_CLIENT_CERT_FILE_PATH")
CONSUL_CLIENT_KEY_FILE_PATH = os.getenv("CONSUL_CLIENT_KEY_FILE_PATH")
CONSUL_SERVER_CA_CERT_PATH = os.getenv("CONSUL_SERVER_CA_CERT_PATH")
CONSUL_HTTPS_PORT = os.getenv("CONSUL_HTTPS_PORT")
# ==============================================================================
# kafka
# ==============================================================================
# 默认kafka域名,若提供了,则不再使用metadata返回的域名。
# 用于 SaaS 没有 consul 域名解析的情况。需要手动给出
DEFAULT_KAFKA_HOST = os.getenv("BKAPP_DEFAULT_KAFKA_HOST")
# ==============================================================================
# redis
# ==============================================================================
# 默认开启
USE_REDIS = os.getenv("BKAPP_USE_REDIS", "on") == "on"
REDIS_HOST = os.getenv("BKAPP_REDIS_HOST", os.getenv("REDIS_HOST", "127.0.0.1"))
REDIS_PORT = int(os.getenv("BKAPP_REDIS_PORT", os.getenv("REDIS_PORT", 6379)))
REDIS_PASSWD = os.getenv("BKAPP_REDIS_PASSWORD", os.getenv("REDIS_PASSWORD", ""))
REDIS_MODE = os.getenv("BKAPP_REDIS_MODE", os.getenv("BK_BKLOG_REDIS_MODE", "single"))
if REDIS_MODE == "single" and BKAPP_IS_BKLOG_API:
REDIS_HOST = os.getenv("BK_BKLOG_REDIS_HOST", os.getenv("REDIS_HOST", ""))
REDIS_PORT = int(os.getenv("BK_BKLOG_REDIS_PORT", os.getenv("REDIS_PORT", 6379)))
REDIS_PASSWD = os.getenv("BK_BKLOG_REDIS_PASSWORD", os.getenv("REDIS_PASSWORD", ""))
if REDIS_MODE == "sentinel":
REDIS_PASSWD = os.getenv("BK_BKLOG_REDIS_PASSWORD", os.getenv("REDIS_PASSWORD", ""))
REDIS_SENTINEL_HOST = os.getenv("BK_BKLOG_REDIS_SENTINEL_HOST", "")
REDIS_SENTINEL_PORT = int(os.getenv("BK_BKLOG_REDIS_SENTINEL_PORT", 26379))
REDIS_SENTINEL_MASTER_NAME = os.getenv("BK_BKLOG_REDIS_SENTINEL_MASTER_NAME", "mymaster")
REDIS_SENTINEL_PASSWORD = os.getenv("BK_BKLOG_REDIS_SENTINEL_MASTER_PASSWORD", "")
# BKLOG 后台QOS配置
BKLOG_QOS_USE = os.getenv("BKAPP_QOS_USE", "on") == "on"
BKLOG_QOS_LIMIT_APP = [
"bk_monitor",
"bk_bkmonitor",
"bk_monitorv3",
"bk_bkmonitorv3",
"bkmonitorv3",
]
# 窗口时间 单位分钟
BKLOG_QOS_LIMIT_WINDOW = int(os.getenv("BK_BKLOG_QOS_LIMIT_WINDOW", 5))
# 窗口内超时次数
BKLOG_QOS_LIMIT = int(os.getenv("BK_BKLOG_QOS_LIMIT", 3))
# 达到窗口内限制次数屏蔽时间 单位分钟
BKLOG_QOS_LIMIT_TIME = int(os.getenv("BK_BKLOG_QOS_LIMIT_TIME", 5))
# ajax请求401返回plain信息
IS_AJAX_PLAIN_MODE = True
# ==============================================================================
# Templates
# ==============================================================================
# mako template dir
MAKO_TEMPLATE_DIR = [os.path.join(PROJECT_ROOT, directory) for directory in ["static/dist", "templates"]]
VUE_INDEX = "index.html"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [os.path.join(PROJECT_ROOT, "templates"), os.path.join(PROJECT_ROOT, "static/dist/")],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
# the context to the templates
"django.contrib.messages.context_processors.messages",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.request",
"django.template.context_processors.csrf",
"apps.utils.context_processors.mysetting", # 自定义模版context,可在页面中使用STATIC_URL等变量
"django.template.context_processors.i18n",
],
"debug": DEBUG,
},
},
]
# ==============================================================================
# Cache
# ==============================================================================
CACHES = {
"redis": {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": f"redis://{REDIS_HOST}:{REDIS_PORT}/0",
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient", "PASSWORD": REDIS_PASSWD},
"KEY_PREFIX": APP_CODE,
"VERSION": REDIS_VERSION,
},
"db": {
"BACKEND": "django.core.cache.backends.db.DatabaseCache",
"LOCATION": "django_cache",
"OPTIONS": {"MAX_ENTRIES": 100000, "CULL_FREQUENCY": 10},
},
"dummy": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"},
"locmem": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"},
}
CACHES["default"] = CACHES["db"]
CACHES["login_db"] = CACHES["db"]
if USE_REDIS:
CACHES["default"] = CACHES["redis"]
CACHES["login_db"] = CACHES["redis"]
if BKAPP_IS_BKLOG_API and REDIS_MODE == "sentinel" and USE_REDIS:
DJANGO_REDIS_CONNECTION_FACTORY = "apps.utils.sentinel.SentinelConnectionFactory"
CACHES["redis_sentinel"] = {
"BACKEND": "django_prometheus.cache.backends.redis.RedisCache",
"LOCATION": f"redis://{REDIS_SENTINEL_MASTER_NAME}?is_master=1",
"OPTIONS": {
"CLIENT_CLASS": "apps.utils.sentinel.SentinelClient",
"PASSWORD": REDIS_PASSWD,
"SENTINELS": [
(
REDIS_SENTINEL_HOST,
REDIS_SENTINEL_PORT,
)
],
"SENTINEL_KWARGS": {"password": REDIS_SENTINEL_PASSWORD},
},
"KEY_PREFIX": APP_CODE,
}
CACHES["default"] = CACHES["redis_sentinel"]
CACHES["login_db"] = CACHES["redis_sentinel"]
"""
以下为框架代码 请勿修改
"""
IS_CELERY = False
if "celery" in sys.argv:
IS_CELERY = True
# celery settings
if IS_USE_CELERY:
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
# remove disabled apps
if locals().get("DISABLED_APPS"):
INSTALLED_APPS = locals().get("INSTALLED_APPS", [])
DISABLED_APPS = locals().get("DISABLED_APPS", [])
INSTALLED_APPS = [_app for _app in INSTALLED_APPS if _app not in DISABLED_APPS]
_keys = (
"AUTHENTICATION_BACKENDS",
"DATABASE_ROUTERS",
"FILE_UPLOAD_HANDLERS",
"MIDDLEWARE",
"PASSWORD_HASHERS",
"TEMPLATE_LOADERS",
"STATICFILES_FINDERS",
"TEMPLATE_CONTEXT_PROCESSORS",
)
import itertools
for _app, _key in itertools.product(DISABLED_APPS, _keys):
if locals().get(_key) is None:
continue
locals()[_key] = tuple([_item for _item in locals()[_key] if not _item.startswith(_app + ".")])
|
import numpy as np
import csv
from matplotlib import pyplot as plt
lines = []
np_lines = []
with open("results_moons.csv", "r") as file:
reader = csv.reader(file, delimiter=";")
for l, row in enumerate(reader) :
if l != 0:
# lines.append(row)
lines.append(
[int(row[0]), int(row[1]), row[2],
float(row[3]), float(row[4]), row[5]])
np_lines.append(row)
else :
fields = row
np_lines = np.array(np_lines)
# print(lines)
print(np_lines)
print(fields)
dico_color = {}
name_function = {}
name_function['call_CSP'] = 'CSP'
name_function['PLNE'] = 'PLNE'
name_function['call_GA'] = 'GA'
name_function['run_cop_kmeans'] = 'Cop-kmeans'
dico_color['call_CSP'] = 'red'
dico_color['PLNE'] = 'blue'
dico_color['call_GA'] = 'green'
dico_color['run_cop_kmeans'] = 'orange'
# print(np_lines[:,2])
map_colors = [dico_color[function] for function in np_lines[:,2]]
map_name = [name_function[function] for function in np_lines[:,2]]
# GRAPH 1 TIME IN FUNCTION OF PB SIZE
# selected_complexity = '40'
# for function, name in name_function.items():
# filtered = np_lines[np_lines[:,2]==function]
# filtered = filtered[filtered[:,1]==selected_complexity]
# if function == 'call_GA':
# filtered_ga = filtered[filtered[:,3].astype(float)< 1500]
# filtered = filtered_ga
# plt.plot(filtered[:,0].astype(int),
# filtered[:,3].astype(float),
# dico_color[function], label=name)
# plt.xlabel("Number of points", fontsize=10)
# plt.ylabel("Time(s)", fontsize=10)
# plt.grid(True)
# plt.legend()
#GRAPH 2 TIME IN FUNCTION OF NUMBER OF CONSTRAINT
# selected_size = '200'
# for function, name in name_function.items():
# filtered = np_lines[np_lines[:,2]==function]
# filtered = filtered[filtered[:,0]==selected_size]
# if function == 'call_GA':
# filtered_ga = filtered[filtered[:,3].astype(float)< 1500]
# filtered = filtered_ga
# plt.plot(filtered[:,1].astype(int),
# filtered[:,3].astype(float),
# dico_color[function], label=name)
# plt.xlabel("Percentage of connections", fontsize=10)
# plt.ylabel("Time(s)", fontsize=10)
# plt.grid(True)
# plt.legend()
# GRAPH 3 SCORE IN FUNCTION OF PB SIZE
# selected_complexity = '40'
# for function, name in name_function.items():
# filtered = np_lines[np_lines[:,2]==function]
# filtered = filtered[filtered[:,1]==selected_complexity]
# if function == 'call_GA':
# filtered_ga = filtered[filtered[:,3].astype(float)< 1500]
# filtered = filtered_ga
# plt.plot(filtered[:,0].astype(int),
# filtered[:,4].astype(float),
# dico_color[function], label=name)
# plt.xlabel("Number of points", fontsize=10)
# plt.ylabel("Minimized Maximum Diameter", fontsize=10)
# plt.grid(True)
# plt.legend()
# GRAPH 4 SCORE IN FUNCTION OF NUMBER OF CONSTRAINT
selected_size = '200'
for function, name in name_function.items():
filtered = np_lines[np_lines[:,2]==function]
filtered = filtered[filtered[:,0]==selected_size]
if function == 'call_GA':
filtered_ga = filtered[filtered[:,3].astype(float)< 1500]
filtered = filtered_ga
plt.plot(filtered[:,1].astype(int),
filtered[:,4].astype(float),
dico_color[function], label=name)
plt.xlabel("Percentage of connections", fontsize=10)
plt.ylabel("Minimized Maximum Diameter", fontsize=10)
plt.grid(True)
plt.legend()
plt.show()
|
#Author: Adrien Michaud
import sys
sys.path.append("../Config/")
import GlobalsVars as v
import arff
import os
import subprocess
import time
import numpy as np
import sys
import scipy as sp
import timeit
import cPickle
from scipy import signal
from sklearn import linear_model
#Used to create the tab countaining all datas
def initTabData():
datas = {}
for s in 'dev','test','cccs','gstrain','gsdev','gstest':
datas[s] = []
for nDim in range(len(v.eName)):
for s in 'dev','test','cccs','gstrain','gsdev','gstest':
datas[s].append([])
for nMod in range(len(v.desc)):
for s in 'dev','test','cccs' :
datas[s][nDim].append([])
return datas
#End initTabData
#Save an object on the disk using cPickle module
def saveObject(obj, addr):
f = open(addr,"wb")
cPickle.dump(obj, f)
f.close()
#End saveObject
#Restaure an object saved on the disk using cPickle module
def restaurObject(addr):
f = open(addr,"rb")
obj = cPickle.load(f)
f.close()
return obj
#End restaurObject
#Augment the tab to take context
def tabContext(datas, cMode, cSize):
tab = []
for i in range(len(datas)):
temp = []
for j in range(cSize):
if (cMode == "left"):
ind = i-j
elif (cMode == "right"):
ind = i+cSize-j
else :
ind = i+int(cSize/2)-j
if (ind < 0):
ind = 0
elif (ind > len(datas)-1):
ind = len(datas)-1
temp.append(datas[ind])
tab.append(temp)
return tab
#End tabContext
#Cut a tab to a size given
def cutTab(tab,size):
lTab = len(tab)
oneF = int(size/9)
if (lTab != size):
temp = []
for i in range(v.nbFPart):
for j in range(oneF):
ind = (int(lTab/9)*i)+j-1
temp.append(tab[ind])
tab = temp
return tab
#End cutTab
#Used to uniformize tab
def cutTabs(datas, part):
#First we uniformize the GS
minSize = 0
for nDim in range(len(v.eName)):
for s in part :
if (minSize > len(datas['gs'+s][nDim]) or minSize == 0):
minSize = len(datas['gs'+s][nDim])
oneF = int(minSize/9)
#We cut all tab to reach this size
for nDim in range(len(v.eName)):
for s in part :
#Gold Standard Tab
datas['gs'+s][nDim] = cutTab(datas['gs'+s][nDim],minSize)
#Predictions tab
for nMod in range(len(v.desc)):
datas[s][nDim][nMod] = cutTab(datas[s][nDim][nMod],minSize)
return datas
#End cutTabs
#Used to resample the tab
def resamplingTab(tab, size):
if (len(tab) != size):
s = signal.resample(tab,size)
return s
else :
return tab
#End resamplingTab
#Calculus of CCC
def cccCalc(pred,ref):
if (len(pred) == len(ref)):
if (v.cccMode == 0):
predMean = np.nanmean(pred)
refMean = np.nanmean(ref)
predVar = np.nanvar(pred)
refVar = np.nanvar(ref)
predV = (pred-predMean)
refV = (ref-refMean)
predRef = np.multiply(predV,refV)
covariance = np.nanmean(predRef)
ccc = (2*covariance)/(predVar+refVar+pow((predMean-refMean),2))
return ccc
else :
oneF = len(pred)/v.nbFPart
cccs = []
for i in range(v.nbFPart):
predTemp = pred[(i*oneF):(i*oneF+oneF-1)]
refTemp = ref[(i*oneF):(i*oneF+oneF-1)]
predMean = np.nanmean(predTemp)
refMean = np.nanmean(refTemp)
predVar = np.nanvar(predTemp)
refVar = np.nanvar(refTemp)
predV = (predTemp-predMean)
refV = (refTemp-refMean)
predRef = np.multiply(predV,refV)
covariance = np.nanmean(predRef)
ccc = (2*covariance)/(predVar+refVar+pow((predMean-refMean),2))
cccs.append(ccc)
return np.nanmean(cccs)
else:
print "Size of pred and ref are not the same"
return 0.0
#End cccCalc
#Remove the column that are not necessary in ARFF
def removeColArff(arff):
ind = 0;
lenght = len(arff['attributes'])
while (ind < len(arff['attributes'])):
remove = False
for i in range(len(v.removedColArff)):
if (ind == len(arff['attributes'])):
break
if (str(arff['attributes'][ind][0]) == str(v.removedColArff[i])):
del(arff['attributes'][ind])
arff['data'] = np.delete(arff['data'],ind,1)
remove = True
lenght = len(arff['attributes'])
if (remove == False) :
ind += 1
return arff
#Fin removeColArff
#Returning the multimodal prediction according to coef
def predMulti(coef, preds, nDim, funcType, cSize):
pred = []
for i in range(len(preds[nDim][0])):
p = 0
if (funcType == 0):
for nMod in range(len(preds[nDim])):
for size in range(cSize):
ind = size*nMod+nMod
p += coef[ind]*preds[nDim][nMod][i][size]
else:
for dim in range(len(v.eName)):
for nMod in range(len(preds[nDim])):
for size in range(cSize):
nbMod = len(preds[nDim])
ind = (nMod*cSize)+size
p+= coef[dim][ind]*preds[dim][nMod][i][size]
pred.append(p)
return pred
#End predMulti
#Put to 0 NaN values in ARFF
def arffNan(arff):
for ind, att in enumerate(arff['attributes']):
for val in arff['data']:
if (val[ind] == "?" or val[ind] == None or val[ind] == np.nan):
val[ind] = 0.0
return arff
#End arffNan
#Put to NaN ? or None values in ARFF:
def arffToNan(arff):
for ind, att in enumerate(arff['attributes']):
for val in arff['data']:
if (val[ind] == "?" or val[ind] == None):
val[ind] = np.nan
return arff
#End arffToNan
#Load and prepare files for the unimodal prediction
def unimodalPredPrep(wSize, wStep, nMod):
feats = {}
#We need the number of line for a wStep of v.tsp
trainLen = len(arff.load(open(v.descNorm[nMod]+"train_"+str(wSize)+"_"+str(v.tsp)+".arff","rb"))['data'])
#We open corresponding files
for s in v.part:
feats[s] = arff.load(open(v.descNorm[nMod]+s+"_"+str(wSize)+"_"+str(wStep)+".arff","rb"))
#We put to 0 NaN values
feats[s] = arffNan(feats[s])
#We transform it in array
feats[s] = np.array(feats[s]['data'])
#We resample it to be at a wSize of v.tsp
feats[s] = resamplingTab(feats[s], trainLen)
return feats, trainLen
#End unimodalPredPrep
def isInt(string, limit):
for i in range(limit):
if (string == str(i)):
return True
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.