index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
995,400 | 8eaf022b1520c7f589cabeb2838cac21f68b7aa0 | import rhocoul
import sys
import os
from math import sqrt, exp, cos, sin, pi, log, log10, acos
import numpy as np
def run(tau,xkappa,z,D):
import Cusp
#du00 = Cusp.du00dBeta(tau,1./xkappa,z,D,1e-9)
#print 'du00', du00
u00 = Cusp.u00(tau,1./xkappa,z,D,1e-9)
print 0.0, u00
kmax = 10
lmax = 80
nmax = 100
rMin = 0.1
L = 6.0
rMax = L/2.
rN = 30
gridType = "LINEAR"
if gridType == "LOG":
rGrid = np.logspace(log10(rMin), log10(rMax), num=rN, endpoint=True)
elif gridType == "LINEAR":
rGrid = np.linspace(rMin, rMax, num=rN, endpoint=True)
elif gridType == "OPT":
rGrid = [rMin]
dr = 2.*(rMax-rMin)/rN
a = 10.
f = [-a]
for i in range(1,rN):
f.append(f[i-1] + 2.*a/rN)
rGrid.append(rGrid[i-1] + (1. - (1./(exp(f[i])+1.)))*dr)
rGrid.append(rMax)
iFourXkappaTau = 1./(4.*xkappa*tau)
iFourPiXkappaTauToHalfD = 1./(4.*pi*xkappa*tau)**(D/2.)
urrs = []
f = open('Vr.dat','w')
g = open('Vr0.dat','w')
for r in rGrid:
ra = np.array((r,0.,0.))
rb = np.array((r,0.,0.))
r1 = sqrt(np.dot(ra,ra))
r2 = sqrt(np.dot(rb,rb))
if (r1 != 0 and r2 != 0):
theta = acos(np.dot(ra,rb)/(r1*r2))
else:
theta = 0
s2 = r1*r1 + r2*r2 - 2*r1*r2*cos(theta)
coul = rhocoul.RhoCoul(r1,r2,theta,tau,xkappa,z,kmax,lmax,nmax,D)
free = iFourPiXkappaTauToHalfD * exp(-s2*iFourXkappaTau)
#print coul, free
urr = -log(coul/free)
print r, urr
f.write('%f %f\n'%(r,urr/(xkappa*z*tau)))
g.write('%f %f\n'%(r,u00/(xkappa*z*tau)))
f.close()
g.close()
def usage():
print "Usage: %s tau lam1 lam2 Z1Z2 D" % os.path.basename(sys.argv[0])
sys.exit(2)
def main(argv=None):
if argv is None:
argv = sys.argv
if "-h" in argv or "--help" in argv:
usage()
try:
tau = float(sys.argv[1])
lam1 = float(sys.argv[2])
lam2 = float(sys.argv[3])
Z1Z2 = float(sys.argv[4])
D = int(sys.argv[5])
lam = lam1*lam2/(lam1 + lam2)
m1 = 1./(2.*lam1)
m2 = 1./(2.*lam2)
m12 = m1*m2/(m1+m2)
lam12 = 1./(2.*m12)
xkappa = lam12
z = Z1Z2/xkappa
except:
usage()
run(tau,xkappa,z,D)
if __name__ == "__main__":
sys.exit(main())
|
995,401 | 8fa814b46f70c9dd4fcb6ca5b5427f37265572b1 | from django.contrib import admin
from .models import Event, Entry
# Register your models here.
# Adds the model to the admin page.
admin.site.register(Event)
admin.site.register(Entry)
|
995,402 | 951f4c86c7b3cc9d92bb22a9e8259af2abbd5be3 | import sys
def calc_comb(items, n):
if n==0: yield []
else:
for i in xrange(len(items)):
for cc in calc_comb(items[i+1:],n-1):
yield [items[i]]+cc
readfile = "A-small-attempt1.in"
writefile = readfile + ".out"
open_read_file = open(readfile,'r')
open_write_file = open(writefile,'w')
num_cases = int(open_read_file.readline())
#case = 0
#for line in open_read_file.readlines():
# if firstline == 0:
# firstline = 1
# num_cases = int(line)
# else:
# case = case + 1
for case in range(num_cases):
[n,A,B,C,D,X0,Y0,M] = open_read_file.readline().split()
n = int(n)
A = int(A)
B = int(B)
C = int(C)
D = int(D)
X0 = int(X0)
Y0 = int(Y0)
M = int(M)
count = 0
X = X0
Y = Y0
tree_list = [[X,Y]]
for index in range(n-1):
X = (A * X + B)% M
Y = (C * Y + D) % M
tree_list.append([X,Y])
# print tree_list
comb_list = calc_comb(tree_list,3)
for comb in comb_list:
x_cent = (comb[0][0] + comb[1][0] + comb [2][0])/3.0
y_cent = (comb[0][1] + comb[1][1] + comb [2][1])/3.0
# print comb
# print x_cent, y_cent
if int(x_cent) == x_cent:
if int(y_cent) == y_cent:
count = count + 1
# print "count = ", count
print "case = ", case
open_write_file.write("Case #")
open_write_file.write(str(case+1))
open_write_file.write(": ")
open_write_file.write(str(count))
open_write_file.write('\n')
open_write_file.close()
|
995,403 | af686d25dbe95010c6558396edeaf55cf7597967 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
from main import main
main(revisions=["issue596-base", "issue596-v1"])
|
995,404 | f27f842f0d27371b36d73ed09f34e8be28528548 | def junta_nome_sobrenome(nomes, sobrenomes):
lista_final = []
for i in nomes:
lista_final += nomes[i] #,sobrenomes[i] |
995,405 | 4da7699eecb5027563375136a6ad364db815532e | import numpy as np
import gdal, os, sys, glob, random
import pylab as pl
def read_drainage_efficiency(self):#, PLOT, FIGURE, DISTRIBUTION):
"""
The purpose of this module is to read (input) the drainage efficiency
of each predisposed element.
If the fractional area of cohorts is > 0.0, then there will be
an assigned drainage efficiency (Below or Above)
If there is no input file, drainage efficiency will be randomly assigned.
However, since we are working with Barrow, the probabibilty of the
drainage efficiency of being 'below' the drainage efficiency threshold
is set to 0.85.
"""
print ' Reading drainage efficiency'
self.drainage_efficiency = {}
drainage = np.zeros(self.ATTM_nrows * self.ATTM_ncols)
for i in range(0, self.ATTM_nrows * self.ATTM_ncols):
if self.ATTM_Total_Fractional_Area[i] > 0.0 :
if self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'random':
chance = random.random()
if chance > self.Terrestrial['Drainage_Efficiency_Random_Value']:
self.drainage_efficiency[i] = 'above'
drainage[i] = 1.
else:
self.drainage_efficiency[i] = 'below'
drainage[i] = 2. # redundant, but explicit
elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'above':
self.drainage_efficiency[i] = 'above'
drainage[i] = 1.
elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'below':
self.drainage_efficiency[i] = 'below'
drainage[i] = 2.
else:
self.drainage_efficiency[i] = 'none'
drainage[i] =0.
print ' done.'
print ' '
# ==================================================
# Create desired output files, figures, and plots
# ==================================================
if self.Terrestrial['Drainage_Efficiency_Figure'].lower() == 'yes':
# -------------------------
# Move to output directory
# -------------------------
if self.Simulation_area.lower() == 'barrow':
os.chdir(self.control['Run_dir']+self.Output_directory+'/Barrow')
# -----------------------
# Create desired output
# -----------------------
drainage = np.reshape(drainage, [self.ATTM_nrows, self.ATTM_ncols])
fig = pl.figure()
pl.imshow(drainage, interpolation='nearest', cmap='bone')
pl.colorbar( extend = 'max', shrink = 0.92)
pl.title('Drainage efficiency')
pl.savefig('./Initialization/Drainage_efficiency.png', format = 'png')
drainage.tofile('./Initialization/Drainage_efficiency.bin')
pl.close()
os.chdir(self.control['Run_dir'])
|
995,406 | 8259820462931a9f1906181b7c803765054978a9 | import numpy as np
N=7
M=np.zeros((N,7),dtype=int)
print(M)
$( function () {
$('button#btn-json').bind('click' , function () {
$.getJSON('/background_process', {
proglang: $('input[name="proglang"]').val(),
} , function (data) {
$("#result").text(data.result);
var dados = data.result;
console.log(dados)
});
return false
});
});
<a href="/R_diario">Registo Diario</a>
<a href="/padrao">Padroes diarios</a>
<a href="/alvo">Tempo no alvo</a> |
995,407 | b3fb6149043b55dcac9cf00aef9649c60e9020ea | """
We will hash every position on the board, this way we don't utilize too much memory as we play large games (will take too long)
RUN this to generate Hashes & replace in your zobrist.py file
"""
import random
from dlgo.gotypes import Player, Point
def to_python(player_state):
if player_state is None:
return 'None'
if player_state == Player.black:
return Player.black
return Player.white
MAX63 = 0x7fffffffffffffff
table = {}
empty_board = 0
for row in range(1,20):
for col in range(1,20):
for state in (Player.black, Player.white):
code = random.randint(0, MAX63) # generates hash
table[Point(row,col), state] = code # stores hash in dictionary
print('from dlgo.gotypes import Player, Point')
print('')
print("__all__ = ['HASH_CODE', 'EMPTY_BOARD']")
print('')
print('HASH_CODE = {')
for (pt, state), hash_code in table.items():
print(' (%r, %s): %r,' % (pt, to_python(state), hash_code))
print('}')
print('')
print('EMPTY_BOARD = %d' % (empty_board,))
|
995,408 | 422763be6bf7a10383e60934dd117b238604b3e7 |
# coding: utf-8
import pandas as pd
import datetime as dt
import requests
import time
result = pd.read_html('https://btc.com/block?date=2016-01-01')
result = pd.DataFrame(result[0])
result = result.rename({0:'Height',1:'Relayed By',2:'Tx Count',3:'Stripped Size(B)',4:'Size(B)',5:'Weight',
6:'Avg Fee Per Tx',7:'Reward',8:'Time',9:'Block Version'},axis=1)
result = result.drop(0,axis=0)
date = []
for i in range(0,991):
day_time = (dt.datetime(2016,1,2)+dt.timedelta(days=i)).strftime('%Y-%m-%d')
date.append(day_time)
for datetime in date:
try:
url = 'https://btc.com/block?date='+datetime
data = pd.read_html(requests.get(url, headers={'User-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}).text)
blockchain_data = pd.DataFrame(data[0])
blockchain_data = blockchain_data.rename({0:'Height',1:'Relayed By',2:'Tx Count',3:'Stripped Size(B)',4:'Size(B)',5:'Weight',
6:'Avg Fee Per Tx',7:'Reward',8:'Time',9:'Block Version'},axis=1)
blockchain_data = blockchain_data.drop(0,axis=0)
result = pd.merge(result,blockchain_data,how='outer')
print('finish:%s' %datetime)
except:
print('No table found time is:%s' %datetime)
time.sleep(60)
result.to_csv('blockchain_data.csv')
|
995,409 | b6f71f9026f34e4ce2856ea1893dd25ec2c9d6cb | import matplotlib.pyplot as plt
import numpy as np
from optmize import *
fig, axes = plt.subplots(nrows=1,ncols=2, figsize=(12,5))
all_data = loadCsv('./test.20.log')
all_data = [np.random.normal(0, std, 100) for std in range(6, 10)]
#fig = plt.figure(figsize=(8,6))
axes[0].violinplot(all_data,
showmeans=False,
showmedians=True
)
axes[0].set_title('violin plot')
axes[1].boxplot(all_data,
)
axes[1].set_title('box plot')
# adding horizontal grid lines
for ax in axes:
ax.yaxis.grid(True)
ax.set_xticks([y+1 for y in range(len(all_data))], )
ax.set_xlabel('xlabel')
ax.set_ylabel('ylabel')
plt.setp(axes, xticks=[y+1 for y in range(len(all_data))],
xticklabels=['abc', 'pso', 'pso-basic', 'tsfcm'],
)
plt.show() |
995,410 | 47cd2e23dfefc330b0b212e588047d18af507c96 | """The bluetooth integration matchers."""
from __future__ import annotations
from dataclasses import dataclass
from fnmatch import translate
from functools import lru_cache
import re
from typing import TYPE_CHECKING, Final, TypedDict
from lru import LRU # pylint: disable=no-name-in-module
from homeassistant.loader import BluetoothMatcher, BluetoothMatcherOptional
from .models import BluetoothServiceInfoBleak
if TYPE_CHECKING:
from collections.abc import MutableMapping
from bleak.backends.scanner import AdvertisementData
MAX_REMEMBER_ADDRESSES: Final = 2048
ADDRESS: Final = "address"
CONNECTABLE: Final = "connectable"
LOCAL_NAME: Final = "local_name"
SERVICE_UUID: Final = "service_uuid"
SERVICE_DATA_UUID: Final = "service_data_uuid"
MANUFACTURER_ID: Final = "manufacturer_id"
MANUFACTURER_DATA_START: Final = "manufacturer_data_start"
class BluetoothCallbackMatcherOptional(TypedDict, total=False):
"""Matcher for the bluetooth integration for callback optional fields."""
address: str
class BluetoothCallbackMatcher(
BluetoothMatcherOptional,
BluetoothCallbackMatcherOptional,
):
"""Callback matcher for the bluetooth integration."""
@dataclass(frozen=False)
class IntegrationMatchHistory:
"""Track which fields have been seen."""
manufacturer_data: bool
service_data: set[str]
service_uuids: set[str]
def seen_all_fields(
previous_match: IntegrationMatchHistory, advertisement_data: AdvertisementData
) -> bool:
"""Return if we have seen all fields."""
if not previous_match.manufacturer_data and advertisement_data.manufacturer_data:
return False
if advertisement_data.service_data and (
not previous_match.service_data
or not previous_match.service_data.issuperset(advertisement_data.service_data)
):
return False
if advertisement_data.service_uuids and (
not previous_match.service_uuids
or not previous_match.service_uuids.issuperset(advertisement_data.service_uuids)
):
return False
return True
class IntegrationMatcher:
"""Integration matcher for the bluetooth integration."""
def __init__(self, integration_matchers: list[BluetoothMatcher]) -> None:
"""Initialize the matcher."""
self._integration_matchers = integration_matchers
# Some devices use a random address so we need to use
# an LRU to avoid memory issues.
self._matched: MutableMapping[str, IntegrationMatchHistory] = LRU(
MAX_REMEMBER_ADDRESSES
)
self._matched_connectable: MutableMapping[str, IntegrationMatchHistory] = LRU(
MAX_REMEMBER_ADDRESSES
)
def async_clear_address(self, address: str) -> None:
"""Clear the history matches for a set of domains."""
self._matched.pop(address, None)
self._matched_connectable.pop(address, None)
def _get_matched_by_type(
self, connectable: bool
) -> MutableMapping[str, IntegrationMatchHistory]:
"""Return the matches by type."""
return self._matched_connectable if connectable else self._matched
def match_domains(self, service_info: BluetoothServiceInfoBleak) -> set[str]:
"""Return the domains that are matched."""
device = service_info.device
advertisement_data = service_info.advertisement
matched = self._get_matched_by_type(service_info.connectable)
matched_domains: set[str] = set()
if (previous_match := matched.get(device.address)) and seen_all_fields(
previous_match, advertisement_data
):
# We have seen all fields so we can skip the rest of the matchers
return matched_domains
matched_domains = {
matcher["domain"]
for matcher in self._integration_matchers
if ble_device_matches(matcher, service_info)
}
if not matched_domains:
return matched_domains
if previous_match:
previous_match.manufacturer_data |= bool(
advertisement_data.manufacturer_data
)
previous_match.service_data |= set(advertisement_data.service_data)
previous_match.service_uuids |= set(advertisement_data.service_uuids)
else:
matched[device.address] = IntegrationMatchHistory(
manufacturer_data=bool(advertisement_data.manufacturer_data),
service_data=set(advertisement_data.service_data),
service_uuids=set(advertisement_data.service_uuids),
)
return matched_domains
def ble_device_matches(
matcher: BluetoothCallbackMatcher | BluetoothMatcher,
service_info: BluetoothServiceInfoBleak,
) -> bool:
"""Check if a ble device and advertisement_data matches the matcher."""
device = service_info.device
if (address := matcher.get(ADDRESS)) is not None and device.address != address:
return False
if matcher.get(CONNECTABLE, True) and not service_info.connectable:
return False
advertisement_data = service_info.advertisement
if (
service_uuid := matcher.get(SERVICE_UUID)
) is not None and service_uuid not in advertisement_data.service_uuids:
return False
if (
service_data_uuid := matcher.get(SERVICE_DATA_UUID)
) is not None and service_data_uuid not in advertisement_data.service_data:
return False
if (
manfacturer_id := matcher.get(MANUFACTURER_ID)
) is not None and manfacturer_id not in advertisement_data.manufacturer_data:
return False
if (manufacturer_data_start := matcher.get(MANUFACTURER_DATA_START)) is not None:
manufacturer_data_start_bytes = bytearray(manufacturer_data_start)
if not any(
manufacturer_data.startswith(manufacturer_data_start_bytes)
for manufacturer_data in advertisement_data.manufacturer_data.values()
):
return False
if (local_name := matcher.get(LOCAL_NAME)) is not None and (
(device_name := advertisement_data.local_name or device.name) is None
or not _memorized_fnmatch(
device_name,
local_name,
)
):
return False
return True
@lru_cache(maxsize=4096, typed=True)
def _compile_fnmatch(pattern: str) -> re.Pattern:
"""Compile a fnmatch pattern."""
return re.compile(translate(pattern))
@lru_cache(maxsize=1024, typed=True)
def _memorized_fnmatch(name: str, pattern: str) -> bool:
"""Memorized version of fnmatch that has a larger lru_cache.
The default version of fnmatch only has a lru_cache of 256 entries.
With many devices we quickly reach that limit and end up compiling
the same pattern over and over again.
Bluetooth has its own memorized fnmatch with its own lru_cache
since the data is going to be relatively the same
since the devices will not change frequently.
"""
return bool(_compile_fnmatch(pattern).match(name))
|
995,411 | 116e2c11c7ae1f59ba16c44db05cebae2e7bdcb3 | from django.apps import AppConfig
class DbtemplateConfig(AppConfig):
name = 'db_template'
|
995,412 | 8a45f93bbb3bfd8bbbe9cc5a7a4d4468eafb39da | from selenium import webdriver
from selenium.webdriver import ActionChains
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
import time
driver = webdriver.Chrome(ChromeDriverManager().install())
driver.implicitly_wait(10)
driver.set_page_load_timeout(20)
driver.maximize_window()
driver.get("https://jqueryui.com/resources/demos/droppable/default.html")
time.sleep(3)
drag_source = driver.find_element(By.ID, "draggable")
drop_destination = driver.find_element(By.ID, "droppable")
act_chains = ActionChains(driver)
#act_chains.drag_and_drop(drag_source, drop_destination).perform()
act_chains\
.click_and_hold(drag_source)\
.move_to_element(drop_destination)\
.release()\
.perform()
driver.quit()
|
995,413 | aec77e3fcdd9c54e32afcfed6691d872e21f50bc |
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {'xp': [-3.4830597643097576, 0.6289831887553419, -17.483059764309758, -36.149726430976415, -10.119506350699233, -1.5386153198653005],
'yp': [-16.277777777777771, -46.80680359435172, -12.0, -2.6666666666666572, -4.590430134624931, 5.8888888888888857],
'min_Jp': 15.1681957187,
'max_Jp': 98.6544342508}
cm_data = [[ 0.06597739, 0.12386005, 0.24948116],
[ 0.06865758, 0.1266325 , 0.25557624],
[ 0.07132312, 0.12939515, 0.26166391],
[ 0.07396584, 0.13214058, 0.2677948 ],
[ 0.07658629, 0.13486916, 0.27396904],
[ 0.07919242, 0.13758843, 0.28014206],
[ 0.08176925, 0.14028419, 0.28640499],
[ 0.08433407, 0.14297318, 0.29265629],
[ 0.08687299, 0.14564215, 0.29898019],
[ 0.08939564, 0.14830082, 0.30531941],
[ 0.09189721, 0.15094484, 0.311703 ],
[ 0.09437767, 0.1535746 , 0.31813071],
[ 0.09684032, 0.15619402, 0.32458157],
[ 0.09927797, 0.15879644, 0.33109756],
[ 0.10169939, 0.16139154, 0.33762358],
[ 0.10409298, 0.1639684 , 0.34422672],
[ 0.10647003, 0.16653949, 0.35083602],
[ 0.10881765, 0.16909286, 0.35752405],
[ 0.11114624, 0.1716403 , 0.36422476],
[ 0.11344526, 0.17417268, 0.37099405],
[ 0.11572015, 0.17669703, 0.37779407],
[ 0.11796711, 0.17921142, 0.38463932],
[ 0.12018172, 0.18171361, 0.39154591],
[ 0.12237222, 0.18421369, 0.39845983],
[ 0.12451836, 0.18669519, 0.40547913],
[ 0.12663654, 0.18917624, 0.41250469],
[ 0.12871435, 0.19164859, 0.41958831],
[ 0.13074695, 0.19411179, 0.42673806],
[ 0.13274084, 0.19657524, 0.43390608],
[ 0.13468282, 0.19903239, 0.44113668],
[ 0.13656674, 0.2014837 , 0.44843501],
[ 0.13839602, 0.20393744, 0.4557622 ],
[ 0.14016368, 0.20639447, 0.46312314],
[ 0.14185409, 0.20885126, 0.47055044],
[ 0.14345986, 0.21131167, 0.47803591],
[ 0.14497642, 0.2137823 , 0.485557 ],
[ 0.146391 , 0.21626634, 0.49311434],
[ 0.14768864, 0.218768 , 0.50070638],
[ 0.14885173, 0.22129285, 0.50832846],
[ 0.14985965, 0.22384818, 0.51597154],
[ 0.1506884 , 0.2264434 , 0.52362079],
[ 0.15131023, 0.22909058, 0.53125369],
[ 0.15168425, 0.23180303, 0.53885457],
[ 0.15175702, 0.23459819, 0.54640144],
[ 0.15150762, 0.23750367, 0.55379602],
[ 0.15085272, 0.24054407, 0.56100954],
[ 0.149778 , 0.24375297, 0.56790391],
[ 0.1482413 , 0.24715977, 0.57437788],
[ 0.14626774, 0.25078506, 0.58029534],
[ 0.14393556, 0.25462901, 0.58555075],
[ 0.14135408, 0.25866955, 0.59010335],
[ 0.13864079, 0.26286871, 0.59397852],
[ 0.13589174, 0.26718438, 0.59724867],
[ 0.13319563, 0.27157423, 0.60000635],
[ 0.13058114, 0.2760103 , 0.60234289],
[ 0.12806719, 0.28047115, 0.60433583],
[ 0.12569199, 0.28493491, 0.60605594],
[ 0.12343537, 0.28939604, 0.60755039],
[ 0.12131237, 0.29384382, 0.60886431],
[ 0.1193314 , 0.29827105, 0.61003449],
[ 0.11747335, 0.3026784 , 0.61108156],
[ 0.11574105, 0.30706267, 0.61202824],
[ 0.11415261, 0.31141804, 0.61290024],
[ 0.11269166, 0.31574724, 0.61370617],
[ 0.11135538, 0.32005035, 0.61445721],
[ 0.11014377, 0.32432716, 0.6151638 ],
[ 0.10905648, 0.32857778, 0.61583463],
[ 0.10809281, 0.3328026 , 0.61647697],
[ 0.10725185, 0.33700217, 0.61709693],
[ 0.10653241, 0.34117717, 0.61769964],
[ 0.10593314, 0.34532836, 0.61828945],
[ 0.1054525 , 0.34945656, 0.61887003],
[ 0.10508879, 0.35356261, 0.61944452],
[ 0.10484013, 0.35764738, 0.62001559],
[ 0.10470452, 0.36171173, 0.62058554],
[ 0.10467981, 0.36575652, 0.62115635],
[ 0.10476371, 0.36978259, 0.62172971],
[ 0.10495382, 0.37379076, 0.62230709],
[ 0.10524762, 0.37778184, 0.62288978],
[ 0.10564252, 0.3817566 , 0.62347886],
[ 0.1061358 , 0.38571579, 0.62407531],
[ 0.10672471, 0.38966014, 0.62467996],
[ 0.10740642, 0.39359035, 0.62529352],
[ 0.10817806, 0.39750711, 0.62591662],
[ 0.10903675, 0.40141105, 0.62654981],
[ 0.10998202, 0.40530231, 0.62719516],
[ 0.11100819, 0.40918206, 0.62785128],
[ 0.11211222, 0.41305092, 0.62851837],
[ 0.11329127, 0.41690943, 0.62919672],
[ 0.11454253, 0.42075813, 0.62988653],
[ 0.11586327, 0.42459754, 0.63058799],
[ 0.11725083, 0.42842816, 0.63130122],
[ 0.1187026 , 0.43225046, 0.63202632],
[ 0.12021702, 0.4360647 , 0.63276403],
[ 0.12179033, 0.43987161, 0.6335134 ],
[ 0.1234202 , 0.44367161, 0.63427436],
[ 0.12510445, 0.4474651 , 0.6350469 ],
[ 0.12684102, 0.45125247, 0.63583095],
[ 0.12862799, 0.45503408, 0.63662642],
[ 0.13046351, 0.45881028, 0.6374332 ],
[ 0.13234563, 0.46258148, 0.63825092],
[ 0.13427263, 0.46634802, 0.63907929],
[ 0.13624311, 0.4701102 , 0.63991816],
[ 0.13825578, 0.47386832, 0.64076733],
[ 0.14030945, 0.47762263, 0.64162657],
[ 0.14240307, 0.48137341, 0.64249562],
[ 0.14453572, 0.48512091, 0.64337422],
[ 0.14670608, 0.48886546, 0.64426159],
[ 0.14891319, 0.49260737, 0.64515709],
[ 0.15115711, 0.49634669, 0.64606096],
[ 0.15343746, 0.50008362, 0.64697283],
[ 0.15575396, 0.50381833, 0.64789234],
[ 0.15810648, 0.50755096, 0.64881909],
[ 0.160495 , 0.51128167, 0.64975267],
[ 0.16291965, 0.51501059, 0.65069263],
[ 0.16538066, 0.51873781, 0.6516385 ],
[ 0.16787709, 0.52246378, 0.65258827],
[ 0.17041079, 0.52618823, 0.65354289],
[ 0.17298246, 0.52991124, 0.65450185],
[ 0.17559291, 0.53363281, 0.65546464],
[ 0.1782431 , 0.53735298, 0.65643069],
[ 0.18093416, 0.54107173, 0.65739946],
[ 0.18366735, 0.54478903, 0.65837034],
[ 0.18644408, 0.54850484, 0.65934275],
[ 0.18926596, 0.55221906, 0.66031605],
[ 0.19213472, 0.5559316 , 0.66128961],
[ 0.19505228, 0.55964231, 0.66226278],
[ 0.19802076, 0.56335105, 0.66323489],
[ 0.20104242, 0.5670576 , 0.66420528],
[ 0.20411976, 0.57076173, 0.66517325],
[ 0.20725543, 0.57446319, 0.66613812],
[ 0.21045232, 0.57816165, 0.6670992 ],
[ 0.2137135 , 0.58185676, 0.66805579],
[ 0.21704227, 0.58554813, 0.6690072 ],
[ 0.22044214, 0.58923531, 0.66995277],
[ 0.22391686, 0.59291779, 0.67089183],
[ 0.22747037, 0.59659503, 0.67182375],
[ 0.23110686, 0.6002664 , 0.67274791],
[ 0.23483073, 0.60393124, 0.67366376],
[ 0.2386466 , 0.60758879, 0.67457077],
[ 0.24255928, 0.61123826, 0.67546847],
[ 0.24657379, 0.61487875, 0.67635647],
[ 0.25069532, 0.61850931, 0.67723446],
[ 0.25492963, 0.62212933, 0.67809791],
[ 0.25928228, 0.62573723, 0.67895065],
[ 0.26375886, 0.62933183, 0.67979274],
[ 0.26836502, 0.63291182, 0.68062438],
[ 0.27310635, 0.63647586, 0.68144595],
[ 0.27799067, 0.64002284, 0.68225204],
[ 0.28302187, 0.64355084, 0.68304906],
[ 0.28820489, 0.64705829, 0.68383831],
[ 0.29354581, 0.65054365, 0.68461866],
[ 0.29905039, 0.65400523, 0.68538993],
[ 0.30471967, 0.65744127, 0.68615859],
[ 0.31055832, 0.66085014, 0.68692427],
[ 0.31656915, 0.66423017, 0.68768878],
[ 0.32274951, 0.66757995, 0.68845924],
[ 0.32910283, 0.67089793, 0.68923459],
[ 0.33562308, 0.67418311, 0.69002275],
[ 0.34230732, 0.67743451, 0.69082687],
[ 0.34914976, 0.68065146, 0.69165158],
[ 0.35614127, 0.68383379, 0.69250293],
[ 0.36327369, 0.68698145, 0.69338491],
[ 0.37053484, 0.69009497, 0.6943034 ],
[ 0.37791226, 0.69317522, 0.69526327],
[ 0.38539385, 0.69622324, 0.69626827],
[ 0.39296332, 0.69924082, 0.697324 ],
[ 0.4006094 , 0.70222938, 0.69843223],
[ 0.40831594, 0.70519122, 0.69959724],
[ 0.41607015, 0.7081284 , 0.7008209 ],
[ 0.42386201, 0.71104276, 0.70210342],
[ 0.43167256, 0.71393771, 0.70344961],
[ 0.43949947, 0.71681422, 0.70485557],
[ 0.44732849, 0.71967527, 0.70632366],
[ 0.45514951, 0.72252334, 0.70785413],
[ 0.46296115, 0.72535947, 0.70944335],
[ 0.47075349, 0.72818626, 0.71109189],
[ 0.47851694, 0.73100635, 0.71280025],
[ 0.48625412, 0.73382019, 0.71456376],
[ 0.49396102, 0.73662941, 0.716381 ],
[ 0.50163106, 0.73943622, 0.71825179],
[ 0.50925965, 0.74224248, 0.72017521],
[ 0.51685029, 0.74504841, 0.72214731],
[ 0.52440134, 0.74785523, 0.72416643],
[ 0.53191155, 0.75066408, 0.72623094],
[ 0.53938005, 0.753476 , 0.72833923],
[ 0.54680444, 0.75629239, 0.73049037],
[ 0.55418394, 0.75911426, 0.73268299],
[ 0.5615215 , 0.76194178, 0.73491438],
[ 0.56881727, 0.7647757 , 0.73718308],
[ 0.57607152, 0.76761672, 0.73948764],
[ 0.58328469, 0.7704655 , 0.74182668],
[ 0.5904573 , 0.77332263, 0.74419886],
[ 0.59758995, 0.77618869, 0.74660289],
[ 0.60468333, 0.77906419, 0.7490375 ],
[ 0.61173817, 0.78194964, 0.75150151],
[ 0.61875522, 0.7848455 , 0.75399375],
[ 0.62573528, 0.7877522 , 0.7565131 ],
[ 0.63267915, 0.79067016, 0.75905849],
[ 0.63958763, 0.79359978, 0.76162887],
[ 0.64646151, 0.79654145, 0.76422323],
[ 0.6533016 , 0.79949552, 0.76684059],
[ 0.66010867, 0.80246236, 0.76948001],
[ 0.66688348, 0.80544232, 0.77214056],
[ 0.67362676, 0.80843573, 0.77482134],
[ 0.68033922, 0.81144292, 0.77752145],
[ 0.68702155, 0.81446423, 0.78024002],
[ 0.69367439, 0.81749998, 0.7829762 ],
[ 0.7002975 , 0.82055075, 0.78572944],
[ 0.70689165, 0.82361681, 0.78849881],
[ 0.71345823, 0.82669826, 0.79128316],
[ 0.7199978 , 0.82979542, 0.79408159],
[ 0.72651088, 0.83290861, 0.7968932 ],
[ 0.73299796, 0.83603816, 0.79971707],
[ 0.73945951, 0.8391844 , 0.80255223],
[ 0.74589598, 0.84234767, 0.8053977 ],
[ 0.7523078 , 0.84552831, 0.80825242],
[ 0.75869517, 0.84872673, 0.81111536],
[ 0.76505775, 0.85194348, 0.81398568],
[ 0.77139709, 0.85517859, 0.81686163],
[ 0.77771359, 0.85843239, 0.81974187],
[ 0.78400765, 0.86170526, 0.82262493],
[ 0.79027967, 0.86499755, 0.82550921],
[ 0.79653012, 0.8683096 , 0.82839299],
[ 0.8027595 , 0.87164177, 0.83127437],
[ 0.80896806, 0.87499448, 0.83415141],
[ 0.8151567 , 0.87836796, 0.83702171],
[ 0.82132643, 0.88176244, 0.83988264],
[ 0.82747812, 0.88517819, 0.84273144],
[ 0.83361281, 0.88861542, 0.8455651 ],
[ 0.83973174, 0.89207431, 0.84838033],
[ 0.84583638, 0.89555495, 0.85117353],
[ 0.85192854, 0.89905734, 0.85394079],
[ 0.85801043, 0.90258135, 0.85667786],
[ 0.86408444, 0.90612679, 0.85938032],
[ 0.87015339, 0.9096933 , 0.86204351],
[ 0.87622047, 0.9132804 , 0.8646626 ],
[ 0.88228934, 0.9168874 , 0.86723268],
[ 0.88836402, 0.92051348, 0.86974885],
[ 0.89444909, 0.92415756, 0.87220619],
[ 0.90054949, 0.92781839, 0.87459995],
[ 0.90666975, 0.93149475, 0.87692634],
[ 0.91281478, 0.93518523, 0.87918186],
[ 0.91898934, 0.93888836, 0.88136368],
[ 0.92519787, 0.94260271, 0.88346979],
[ 0.93144429, 0.94632694, 0.88549899],
[ 0.93773184, 0.95005986, 0.88745092],
[ 0.94406341, 0.95380038, 0.88932539],
[ 0.95044015, 0.95754793, 0.89112376],
[ 0.95686223, 0.96130226, 0.89284794],
[ 0.963329 , 0.96506343, 0.8944999 ],
[ 0.96983887, 0.96883187, 0.89608183],
[ 0.97638938, 0.97260834, 0.89759591],
[ 0.98297732, 0.97639393, 0.89904421],
[ 0.98959887, 0.98019003, 0.90042859],
[ 0.99624974, 0.98399826, 0.90175064]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
|
995,414 | 0dd795a280628cb04f72ab0f5c90af0c9a2bbc08 | def assign(tokens_path, wiki_titles, wiki_tokens, var6, var7, var8, var9, var10, left, first=False):
import re,sys
from gensim import corpora
import math
from itertools import groupby
import multiprocessing
from nltk.stem.wordnet import WordNetLemmatizer
stemmer = WordNetLemmatizer()
from time import strftime
def clean2(myList): # function to remove duplicates from the list of links generated by mechanize
try:
last = myList[-1]
for i in range(len(myList)-2, -1, -1):
if last == myList[i]:
del myList[i]
else:
last = myList[i]
except IndexError:
pass
tokens_file = open(tokens_path+'tokens.txt').read()
str678 = re.compile('\.\.')
tokens_file = str678.sub('', tokens_file)
tokens_file = re.sub('/', ', ', tokens_file)
texts_all = tokens_file.split('\n')
texts_dict = {}
if left:
left = [int(l) for l in left]
for l in left:
texts_dict[l] = [w for w in texts_all[l].split(', ')]
else:
lefto = range(len(texts_all))
for l in lefto:
texts_dict[l] = [w for w in texts_all[l].split(', ')]
tit_tok_file = open(tokens_path+'title-tokens.txt').read()
tit_tok_file = re.sub('/', ', ', tit_tok_file)
tit_tok_all = tit_tok_file.split('\n')
tit_tok_dict = {}
if left:
left = [int(l) for l in left]
for l in left:
tit_tok_dict[l] = [w for w in tit_tok_all[l].split(', ')]
else:
lefto = range(len(tit_tok_all))
for l in lefto:
tit_tok_dict[l] = [w for w in tit_tok_all[l].split(', ')]
if texts_dict[max(texts_dict.keys())][0] == "" and tit_tok_dict[max(texts_dict.keys())][0] == "":
del texts_dict[len(texts_all)-1]
del tit_tok_dict[len(tit_tok_all)-1]
titles = open(wiki_titles).read()
tit_texts = open(wiki_tokens).read()
tit_texts = re.sub('/', ', ', tit_texts)
titles = titles.split('\n')
tit_texts = tit_texts.split('\n')
if titles[-1] == "" and tit_texts[-1] == "":
del titles[-1]
del tit_texts[-1]
#print len(titles)
#print len(tit_texts)
if titles[0] == "":
del titles[0]
del tit_texts[0]
#titles = [[w.lower()] for w in titles]
def clean(myList): # function to remove duplicates from the list of links generated by mechanize
myList2 = []
for m in myList:
myList2.append(m)
try:
myList2.sort(key=lambda x: x, reverse=True)
last = myList2[-1]
for i in range(len(myList2)-2, -1, -1):
if last == myList2[i]:
del myList2[i]
else:
last = myList2[i]
except IndexError:
pass
return myList2
def clean3(myList): # function to remove duplicates from the list of links generated by mechanize
try:
myList.sort(key=lambda x: x, reverse=True)
last = myList[-1]
for i in range(len(myList)-2, -1, -1):
if last == myList[i]:
del myList[i]
else:
last = myList[i]
except IndexError:
pass
def second_largest(numbers):
m1, m2 = 1, None
for x in numbers:
if x >= m1:
m1, m2 = x, m1
elif x > m2:
m2 = x
return m2
if first is True:
for nj in range(len(titles)):
gram = titles[nj]
if len(gram) > 2:
data = {}
index = open('docs/labels/index.txt', 'a')
grams = open('docs/labels/gram.txt', 'a')
texts_gram = open('docs/labels/text.txt', 'a')
match_co = open('docs/labels/match-co.txt', 'a')
score_f = open('docs/labels/score.txt', 'a')
gram_index = open('docs/labels/gram_index_for_check.txt', 'a')
print nj
print>>gram_index, nj
for numb in texts_dict.keys():
text = texts_dict.get(numb)
tok = tit_tok_dict.get(numb)
#print numb
if len(tok) >= 3:
if re.search('\+|\?|\!|\"|\'|\*|^\W|\(|\)|\=', gram):
gramu = "ABRACADABRA"
else:
gramu = gram
searc = re.search('^'+gramu+'\W|\W'+gramu+'\W|\W'+gramu+'$|^'+gramu+'$', ' '.join(tok), re.I)
if searc:
print text
print gram
print tok
print "+++++"
print>>index, numb
print>>grams, gram
print>>texts_gram, ', '.join(texts_dict.get(numb))
print>>match_co, "None"
print>>score_f, "None"
def processing(ver,num1,num2):
for nj in range(num1,num2):
gram = titles[nj]
if len(gram) > 2:
data = {}
index = open('docs/labels/index'+ver+'.txt', 'a')
grams = open('docs/labels/gram'+ver+'.txt', 'a')
texts_gram = open('docs/labels/text'+ver+'.txt', 'a')
match_co = open('docs/labels/match-co'+ver+'.txt', 'a')
score_f = open('docs/labels/score'+ver+'.txt', 'a')
gram_index = open('docs/labels/gram_index_for_check'+ver+'.txt', 'a')
print nj
print>>gram_index, nj
gram_list = [[w.lower()] for w in re.sub('_', ' ', gram).split(' ')]
content = [[t] for t in tit_texts[nj].split(', ') if t]
content.extend(gram_list)
dict_titles = corpora.dictionary.Dictionary(w for w in content)
diction = sorted([dict_titles.doc2bow(t) for t in content])
diction = [c[0][0] for c in diction]
diction_scores = [len(list(group)) for key, group in groupby(diction)]
diction_clean = list(sorted(set(sorted(diction))))
for nu in range(len(diction_clean)):
data[diction_clean[nu]] = diction_scores[nu]
gram_dict = sorted([dict_titles.doc2bow(t) for t in gram_list])
gram_dict = [c[0][0] for c in gram_dict]
gram_clean = list(sorted(set(sorted(gram_dict))))
for na in range(len(gram_clean)):
data[gram_clean[na]]+=data.get(gram_clean[na])
#print ids4words_titles
for numb in texts_dict.keys():
text = texts_dict.get(numb)
tok = tit_tok_dict.get(numb)
text2 = []
if int(var10) == 1:
text2.extend(text)
text2.extend(text)
text = text2
#print numb
if int(var6) == 1:
if len(tok) >= 3:
tok = [[stemmer.lemmatize(w)] for w in tok]
class TitleBigrams(object):
def __iter__(self):
for t in tok:
# assume there's one document per line, tokens separated by whitespace
yield dict_titles.doc2bow(t)
sample_titlecorp = list(TitleBigrams())
#print sample_titlecorp
sample_titlecorp = [w for w in sample_titlecorp if w]
title_ratio = len(sample_titlecorp) / len(tok)
if title_ratio > 0.34:
#print title_ratio
if len(text) >= 2:
if int(var7) == 1:
clean3(text)
text = [[w] for w in text]
class TextMatch(object):
def __iter__(self):
for w in text:
# assume there's one document per line, tokens separated by whitespace
yield dict_titles.doc2bow(w)
text_corp = list(TextMatch())
text_corp = [w[0][0] for w in text_corp if w]
text_corp = sorted(text_corp)
result = []
for k in text_corp:
result.append(data.get(k)+1)
for n in range(len(text)-len(result)):
result.append(1)
second_l = second_largest(clean(result))
if (max(result) <= second_l * 2 and max(result) > var8):
match_coef = sum([math.sqrt(s) for s in result]) / len(text)
if (match_coef > var9 and len(text_corp) > len(result) / 3):
print text
print result
print gram
print match_coef
print "+++++"
print>>index, numb
print>>grams, gram
print>>texts_gram, ', '.join(texts_dict.get(numb))
print>>match_co, match_coef
print>>score_f, sum(result) / len(text)
else:
if len(text) >= 2:
if int(var7) == 1:
clean3(text)
text = [[w] for w in text]
class TextMatch(object):
def __iter__(self):
for w in text:
# assume there's one document per line, tokens separated by whitespace
yield dict_titles.doc2bow(w)
text_corp = list(TextMatch())
text_corp = [w[0][0] for w in text_corp if w]
text_corp = sorted(text_corp)
result = []
for k in text_corp:
result.append(data.get(k)+1)
for n in range(len(text)-len(result)):
result.append(1)
second_l = second_largest(clean(result))
if (max(result) <= second_l * 2 and max(result) > var8):
match_coef = sum([math.sqrt(s) for s in result]) / len(text)
if (match_coef > var9 and len(text_corp) > len(result) / 3):
print text
print result
print gram
print match_coef
print "+++++"
print>>index, numb
print>>grams, gram
print>>texts_gram, ', '.join(texts_dict.get(numb))
print>>match_co, match_coef
print>>score_f, sum(result) / len(text)
else:
if len(text) >= 2:
if int(var7) == 1:
clean3(text)
text = [[w] for w in text]
class TextMatch(object):
def __iter__(self):
for w in text:
# assume there's one document per line, tokens separated by whitespace
yield dict_titles.doc2bow(w)
text_corp = list(TextMatch())
text_corp = [w[0][0] for w in text_corp if w]
text_corp = sorted(text_corp)
result = []
for k in text_corp:
result.append(data.get(k)+1)
for n in range(len(text)-len(result)):
result.append(1)
second_l = second_largest(clean(result))
if (max(result) <= second_l * 2 and max(result) > var8):
match_coef = sum([math.sqrt(s) for s in result]) / len(text)
if (match_coef > var9 and len(text_corp) > len(result) / 3):
print text
print result
print gram
print match_coef
print "+++++"
print>>index, numb
print>>grams, gram
print>>texts_gram, ', '.join(texts_dict.get(numb))
print>>match_co, match_coef
print>>score_f, sum(result) / len(text)
index.close()
grams.close()
texts_gram.close()
match_co.close()
score_f.close()
gram_index.close()
if sys.platform == 'win32' or sys.platform == 'cygwin':
processing("",0,len(titles))
else:
if multiprocessing.cpu_count() == 8:
multiprocessing.Process(target=processing, args=("1",0,int(len(titles)/8),))
multiprocessing.Process(target=processing, args=("2",int(len(titles)/8),int(len(titles)/8)*2,))
multiprocessing.Process(target=processing, args=("3",int(len(titles)/8)*2,int(len(titles)/8)*3,))
multiprocessing.Process(target=processing, args=("4",int(len(titles)/8)*3,int(len(titles)/8)*4,))
multiprocessing.Process(target=processing, args=("5",int(len(titles)/8)*4,int(len(titles)/8)*5,))
multiprocessing.Process(target=processing, args=("6",int(len(titles)/8)*5,int(len(titles)/8)*6,))
multiprocessing.Process(target=processing, args=("7",int(len(titles)/8)*6,int(len(titles)/8)*7,))
multiprocessing.Process(target=processing, args=("8",int(len(titles)/8)*7,len(titles),))
elif multiprocessing.cpu_count() == 4:
multiprocessing.Process(target=processing, args=("1",0,int(len(titles)/4),))
multiprocessing.Process(target=processing, args=("2",int(len(titles)/4),int(len(titles)/4)*2,))
multiprocessing.Process(target=processing, args=("3",int(len(titles)/4)*2,int(len(titles)/4)*3,))
multiprocessing.Process(target=processing, args=("4", int(len(titles)/4)*3,len(titles),))
elif multiprocessing.cpu_count() == 2:
multiprocessing.Process(target=processing, args=("1",0,int(len(titles)/2),))
multiprocessing.Process(target=processing, args=("2",int(len(titles)/2),len(titles),))
else:
processing("",0,len(titles)) |
995,415 | aca23e70d5525c175ee243b7d19eed9aa8ec683e | from collections import Counter
import pytest
from presidio_evaluator.evaluation import EvaluationResult, Evaluator
from tests.mocks import (
MockTokensModel,
)
@pytest.fixture(scope="session")
def scores():
results = Counter(
{
("O", "O"): 30,
("ANIMAL", "ANIMAL"): 4,
("ANIMAL", "O"): 2,
("O", "ANIMAL"): 1,
("PERSON", "PERSON"): 2,
}
)
model = MockTokensModel(prediction=None)
evaluator = Evaluator(model=model)
evaluation_result = EvaluationResult(results=results)
return evaluator.calculate_score([evaluation_result])
def test_to_confusion_matrix(scores):
entities, confmatrix = scores.to_confusion_matrix()
assert "O" in entities
assert "PERSON" in entities
assert "ANIMAL" in entities
assert confmatrix == [[4, 2, 0], [1, 30, 0], [0, 0, 2]]
def test_str(scores):
return_str = str(scores)
assert (
"PERSON 100.00% 100.00% 2"
in return_str
)
assert (
"ANIMAL 80.00% 66.67% 6"
in return_str
)
assert (
"PII 85.71% 75.00% 8" in return_str
)
|
995,416 | 7214a82504c7352f181bbc7485e3681fcd673264 | __author__ = 'Frederik Diehl'
# Code for the NN adapted from the breze library example.
# For Breze, see github.com/breze-no-salt/breze.
import cPickle
import gzip
import time
import numpy as np
import theano.tensor as T
import climin.schedule
import climin.stops
import climin.initialize
from breze.learn.mlp import Mlp
from breze.learn.data import one_hot
from apsis.models.parameter_definition import *
from apsis.assistants.lab_assistant import ValidationLabAssistant
from apsis.utilities.logging_utils import get_logger
logger = get_logger("apsis.demos.demo_MNIST_NN")
start_time = None
def load_MNIST():
datafile = 'mnist.pkl.gz'
# Load data.
with gzip.open(datafile,'rb') as f:
train_set, val_set, test_set = cPickle.load(f)
X, Z = train_set
VX, VZ = val_set
TX, TZ = test_set
Z = one_hot(Z, 10)
VZ = one_hot(VZ, 10)
TZ = one_hot(TZ, 10)
image_dims = 28, 28
return X, Z, VX, VZ, TX, TZ, image_dims
def do_one_eval(X, Z, VX, VZ, step_rate, momentum, decay, c_wd):
max_passes = 100
batch_size = 250
max_iter = max_passes * X.shape[0] / batch_size
n_report = X.shape[0] / batch_size
optimizer = 'rmsprop', {'step_rate': step_rate, 'momentum': momentum, 'decay': decay}
#optimizer = 'adam'
#optimizer = 'gd', {'steprate': 0.1, 'momentum': climin.schedule.SutskeverBlend(0.99, 250), 'momentum_type': 'nesterov'}
m = Mlp(784, [800], 10, hidden_transfers=['sigmoid'], out_transfer='softmax', loss='cat_ce',
optimizer=optimizer, batch_size=batch_size)
climin.initialize.randomize_normal(m.parameters.data, 0, 1e-1)
losses = []
weight_decay = ((m.parameters.in_to_hidden**2).sum()
+ (m.parameters.hidden_to_out**2).sum())
weight_decay /= m.exprs['inpt'].shape[0]
m.exprs['true_loss'] = m.exprs['loss']
c_wd = c_wd
m.exprs['loss'] = m.exprs['loss'] + c_wd * weight_decay
n_wrong = 1 - T.eq(T.argmax(m.exprs['output'], axis=1), T.argmax(m.exprs['target'], axis=1)).mean()
f_n_wrong = m.function(['inpt', 'target'], n_wrong)
stop = climin.stops.AfterNIterations(max_iter)
pause = climin.stops.ModuloNIterations(n_report)
start = time.time()
# Set up a nice printout.
keys = '#', 'seconds', 'loss', 'val loss', 'train emp', 'val emp'
max_len = max(len(i) for i in keys)
header = '\t'.join(i for i in keys)
#print header
#print '-' * len(header)
for i, info in enumerate(m.powerfit((X, Z), (VX, VZ), stop, pause)):
passed = time.time() - start
losses.append((info['loss'], info['val_loss']))
#img = tile_raster_images(fe.parameters['in_to_hidden'].T, image_dims, feature_dims, (1, 1))
#save_and_display(img, 'filters-%i.png' % i)
info.update({
'time': passed,
'train_emp': f_n_wrong(X, Z),
'val_emp': f_n_wrong(VX, VZ),
})
row = '%(n_iter)i\t%(time)g\t%(loss)g\t%(val_loss)g\t%(train_emp)g\t%(val_emp)g' % info
#print row
return info["val_emp"]
def do_evaluation(LAss, opt, X, Z, VX, VZ):
to_eval = LAss.get_next_candidate(opt)
step_rate = to_eval.params["step_rate"]
momentum = to_eval.params["momentum"]
decay = to_eval.params["decay"]
c_wd = to_eval.params["c_wd"]
result = do_one_eval(X, Z, VX, VZ, step_rate, momentum, decay, c_wd)
to_eval.result = result
LAss.update(opt, to_eval)
def demo_on_MNIST(random_steps, steps, cv=1):
X, Z, VX, VZ, TX, TZ, image_dims = load_MNIST()
param_defs = {
#"step_rate": MinMaxNumericParamDef(0, 1),
"step_rate": AsymptoticNumericParamDef(0, 1),
#"momentum": MinMaxNumericParamDef(0, 1),
"momentum": AsymptoticNumericParamDef(1, 0),
'decay': MinMaxNumericParamDef(0, 1),
"c_wd": MinMaxNumericParamDef(0, 1)
}
LAss = ValidationLabAssistant(cv=cv)
experiments = ["random_mnist", "bay_mnist_ei_L-BFGS-B"]#, "bay_mnist_ei_rand"]
LAss.init_experiment("random_mnist", "RandomSearch", param_defs, minimization=True)
#LAss.init_experiment("bay_mnist_ei_rand", "BayOpt", param_defs,
# minimization=True, optimizer_arguments=
# {"acquisition_hyperparams":{"optimization": "random"}})
global start_time
start_time = time.time()
#First, the random steps
for i in range(random_steps*cv):
print("%s\tBeginning with random initialization. Step %i/%i" %(str(time.time()-start_time), i, random_steps*cv))
do_evaluation(LAss, "random_mnist", X, Z, VX, VZ)
#clone
#LAss.init_experiment("bay_mnist_ei_L-BFGS-B", "BayOpt", param_defs, minimization=True)
LAss.clone_experiments_by_name(exp_name=experiments[0], new_exp_name=experiments[1],
optimizer="BayOpt",
optimizer_arguments={"initial_random_runs": random_steps})
#learn the rest
for i in range((steps-random_steps)*cv):
for opt in experiments:
print("%s\tBeginning with %s, step %i/%i" %(time.time() - start_time, opt, i+1+random_steps*cv, steps*cv))
do_evaluation(LAss, opt, X, Z, VX, VZ)
for opt in experiments:
logger.info("Best %s score: %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
print("Best %s score: %s" %(opt, [x.result for x in LAss.get_best_candidates(opt)]))
LAss.plot_result_per_step(experiments, title="Neural Network on MNIST.", plot_min=0.0, plot_max=1.0)
if __name__ == '__main__':
demo_on_MNIST(10, 30, 5) |
995,417 | 428b16b5cdb7360f6785e9c340abe741e21b4396 | '''
input: [array_int]
output: min_diff for 2 buckets
for each position in N inputs,
There're 2 states: in bucket 1 or not in bucket,
so run time cost will be: 2^n.
'''
class bucket_solution1:
def __init__(self, input_array):
self.array = input_array
self.total = sum(self.array)
print("total: ", self.total)
def findMinDiff_impl(self, size, bucket1_sum):
print("size: ", size, ", bucket1_sum: ", bucket1_sum)
if size == 0:
return abs(self.total - bucket1_sum - bucket1_sum)
return min(self.findMinDiff_impl(size - 1, bucket1_sum + self.array[size-1]),
self.findMinDiff_impl(size - 1, bucket1_sum))
def findMinDiff(self):
value = self.findMinDiff_impl(len(self.array), 0)
print("min_diff: ", value)
def main():
array = [1, 4, 13, 6]
bucket_solution1(array).findMinDiff()
if __name__ == '__main__':
main()
|
995,418 | 3c0fa987e9d743c8183e3e99f8744663a5745afb | from argh import arg
from six import iteritems
__author__ = 'thauser'
from pnc_cli.swagger_client import UsersApi
from pnc_cli.swagger_client import UserRest
from pnc_cli import utils
users_api = UsersApi(utils.get_api_client())
def user_exists(user_id):
existing = utils.checked_api_call(users_api, 'get_specific', id=user_id)
if existing:
return True
return False
def get_user_id_by_name(name):
users = users_api.get_all(q='username=='+name).content
if users:
user = users[0]
return user.id
return None
def get_user_id(id, name):
if id:
found_id = id
if not user_exists(id):
print("No User with ID {} exists.".format(id))
return
elif name:
found_id = get_user_id_by_name(name)
if not found_id:
print("No User with username {} exists.".format(name))
return
else:
print("Either a User's name or ID is required.")
return
return found_id
def create_user_object(**kwargs):
created = UserRest()
for key, value in iteritems(kwargs):
setattr(created, key, value)
return created
def list_users():
"""
List all Users
"""
response = utils.checked_api_call(users_api, 'get_all')
if response:
return response.content
@arg('-i', '--id', help='ID for the User to retrieve.')
@arg('-n', '--name', help='Username of the User to retrieve.')
def get_user(id=None, name=None):
"""
Get a specific User
"""
found_id = get_user_id(id, name)
if not found_id:
return
response = utils.checked_api_call(users_api, 'get_specific', id=found_id)
if response:
return response.content
@arg('username', help='Username for the new User.')
@arg('-e', '--email', help='Email address for the new User.')
@arg('-fn', '--first-name', help="User's first name.")
@arg('-ln', '--last-name', help="User's last name.")
def create_user(username, **kwargs):
"""
Create a new User
"""
user = create_user_object(username=username, **kwargs)
response = utils.checked_api_call(users_api, 'create_new', body=user)
if response:
return response.content
@arg('-i', '--id', help='ID of the User to update.')
@arg('-n', '--name', help='Username for the User to update.')
@arg('-u', '--username', help='New username for the User.')
@arg('-fn', '--first-name', help='New first name.')
@arg('-ln', '--last-name', help='New last name.')
@arg('-e', '--email', help='New email.')
def update_user(id=None, name=None, **kwargs):
found_id = get_user_id(id, name)
if not found_id:
return
to_update = users_api.get_specific(id=found_id).content
for key, value in iteritems(kwargs):
if value is not None:
setattr(to_update, key, value)
response = utils.checked_api_call(users_api, 'update', id=found_id, body=to_update)
if response:
return response.content
|
995,419 | a2f422ebdf558ca5cb4911d1d68045f83ae4cdcf | """
ABC048 A - AtCoder *** Contest
https://atcoder.jp/contests/abc048/tasks/abc048_a
"""
a,b,c = input().split()
print(a[0]+b[0]+c[0])
|
995,420 | 2b4306e57eda5de7755dda4d2e9349b72b8cb7f9 | # from selenium import webdriver
# driver = webdriver.Firefox(r'C:\\Users\\udos8\\Downloads\\geckodriver.exe')
# driver.get('https://www.ebay-kleinanzeigen.de/m-einloggen.html?targetUrl=/anzeigen/m-einloggen.html')
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import presence_of_element_located
with webdriver.Firefox() as driver:
driver.get("http://google.com/ncr")
driver.find_element_by_name("q").send_keys("cheese" + Keys.RETURN)
wait.until(presence_of_element_located((By.CSS_SELECTOR, "h3>a")))
results = driver.find_elements_by_css_selector("h3>a")
for i, result in results.iteritems():
print("#{}: {} ({})".format(i, result.text, result.get_property("href")))
wait = WebDriverWait(driver, 10) |
995,421 | 693401563a43a4f19f4737945740b42097157f42 | import pandas as pd
import numpy as np
import preproc as pre
cols = ["Date", "Open", "High", "Low", "Close", "Volume", "Name"]
names = [
"MMM",
"AXP",
"AAPL",
"BA",
"CAT",
"CVX",
"CSCO",
"KO",
"DIS",
"XOM",
"GE",
"GS",
"HD",
"IBM",
"INTC",
"JNJ",
"JPM",
"MCD",
"MRK",
"MSFT",
"NKE",
"PFE",
"PG",
"TRV",
"UTX",
"UNH",
"VZ",
"WMT",
"GOOGL",
"AMZN",
"AABA"
]
def detrend():
df = pre.preproc()
# detrends each stock time series using the difference method
for n in names:
df[n] = df[n].diff()
return df
if __name__ == "__main__":
detrend()
|
995,422 | 1fb91d88c54258bb7a479c4e87c91486388f50e7 | import numpy as np;
import scipy.linalg as linalg
def filterOutliers (Features, Dataset, numStdDevs):
NewData = Dataset[:,:]
for i in Features:
var = NewData[:,i]
avgvar = round(np.mean(var),3)
stdvar = round(np.std(var),3)
NewData = NewData[:,:][((NewData[:,i] < avgvar + (stdvar*numStdDevs)) & (NewData[:,i] > avgvar - (stdvar*numStdDevs)))]
return NewData
def estimateVars (dataset):
[m,n] = dataset.shape
mu = sum(dataset) /m
sigma = sum(np.power(dataset-mu,2))/m
return mu, sigma
def probabilitize(data,mu,sigma):
k = len(mu)
if sigma.ndim == 1:
sigma = np.reshape(sigma,(-1,sigma.shape[0]))
if sigma.shape[1] == 1 or sigma.shape[0] == 1:
sigma = linalg.diagsvd(sigma.flatten(), len(sigma.flatten()), len(sigma.flatten()))
X = data - mu.reshape(mu.size, order='F').T
p = np.dot(np.power(2 * np.pi, - k / 2.0), np.power(np.linalg.det(sigma), -0.5) ) * \
np.exp(-0.5 * np.sum(np.dot(X, np.linalg.pinv(sigma)) * X, axis=1))
return p
def getThreshold(yval, pval):
stepsize = (max(pval) - min(pval)) / 1000
i = min(pval)
F1 = 0
bestF1 = 0
bestEps = 0
truepos = 0
falsepos = 0
falseneg = 0
while i < max(pval):
pred = pval < i;
tp = sum((pred - yval == 0) & (yval ==1))
fp = sum((pred-yval == 1) & (pred==1))
fn = sum((yval - pred ==1) & (pred == 0))
prec = tp /(tp + fp);
rec = tp / (tp + fn);
F1 = (2 * prec * rec) / (prec + rec)
if F1 > bestF1:
bestF1 = F1
bestEps = i
truepos = tp
falsepos = fp
falseneg = fn
i += stepsize
return bestEps, bestF1, truepos, falsepos, falseneg
def trainData(TrainData, ValidationData, yval, featureCols, standardDevs):
X = filterOutliers(featureCols, TrainData,standardDevs)
Xval = ValidationData[:,featureCols]
Xtrain = X[:,featureCols]
[mu, sigma] = estimateVars(Xtrain)
pval = probabilitize(Xval, mu, sigma)
[eps, bestF1, tp, fp, fn] = getThreshold(yval,pval)
print("best epsilon = " + str(eps))
print("best F1 = " + str(bestF1))
print("True Positives = " + str(tp))
print("False Positives = " + str(fp))
print("False Negatives = " + str(fn))
return eps, bestF1, tp, fp, fn, mu, sigma
def exportData(dataset, filename):
path = '/yourpathhere/'
np.savetxt(path + filename, dataset, delimiter = ',', fmt='%25.8f')
def predictNewData (freshDataset, featureCols, eps, mu, sigma):
features = freshDataset[:,featureCols]
p = probabilitize(features,mu,sigma)
c = freshDataset.shape[1]
predictions = p < eps
predictions.shape = [predictions.size,1]
predictedData = np.append(freshDataset,predictions,1)
predictedData = predictedData[:,:][(predictedData[:,c]==1)]
return p, predictedData
def testThreshold(testData, ytest, eps, mu, sigma, featureCols):
Xval = testData[:,featureCols]
[predScore, predData] = predictNewData(testData, featureCols, eps,mu,sigma)
prediction = predScore < eps
tp = sum((prediction - ytest == 0) & (ytest ==1))
fp = sum((prediction-ytest == 1) & (prediction==1))
fn = sum((ytest - prediction ==1) & (prediction == 0))
prec = tp /(tp + fp);
rec = tp / (tp + fn);
F1 = (2 * prec * rec) / (prec + rec)
print("Testing epsilon = " + str(eps))
print("F1 = " + str(F1))
print("True Positives = " + str(tp))
print("False Positives = " + str(fp))
print("False Negatives = " + str(fn))
print("Precision = " + str(prec))
print("Recall = " + str(rec))
return F1, tp, fp, fn, prec, rec
##NEW VARIABLES
"""
loadedData = np.loadtxt("/yourpathhere/Dec10_WW.txt"
, skiprows = 1, delimiter = ',');
validationData = np.loadtxt("/yourpathhere/Jan2020_validationSet.txt"
, skiprows = 1, delimiter = ',');
testData = np.loadtxt("/yourpathhere/Jan2020_testSet.txt"
, skiprows = 1, delimiter = ',');
yval = validationData[:,6]
ytest = testData[:,6]
featureCols = [1,2,3,4,5]
##Get Params
[eps, bestF1, tp, fp, fn, mu, sigma]= trainData(loadedData, validationData, yval, featureCols, 55)
testThreshold(testData, ytest, eps, mu, sigma, featureCols)
[p, predData] = predictNewData(loadedData, featureCols, eps,mu,sigma)
exportData(predData, 'processedFile_Dec30.txt')
"""
|
995,423 | ab6b3ed2a5457fd4abb37df1771b5ade771c04e9 |
x = open("D:\PythonPracticeAgain\FileHandling\demofile.txt","r")
print(x.read(8)) |
995,424 | 86d1df702af501a47dba9a6837a2293668defbad | #!/usr/bin/python
# -*- coding: utf-8 -*-
import socket
import sys
import time
time.sleep(3) #margen de busqueda de nodos
# Creando un socket TCP/IP
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Conecta el socket en el puerto cuando el servidor este escuchando
server_address = ('127.0.0.1', 5000)
print ('conectando a ' + str(server_address[0]) + ' puerto '+ str(server_address[1]))
sock.connect(server_address)
log = open("registro_cliente.txt","a")
try:
# Enviando datos
message = 'Hola soy ' + server_address[0]
messageByte = message.encode('utf-8')
sock.sendall(messageByte)
databyte = sock.recv(1024)
data = databyte.decode()
print ("Respuesta desde el servidor: " + data)
log.write(data + "\n")
databyte2 = sock.recv(1024)
data2 = databyte2.decode()
print ("servidor dice: " + data2)
log.write(data2 + "\n")
finally:
print('cerrando socket')
log.close()
|
995,425 | ec1bc87342351ddc351bc240263f32e8ea6602de | from setuptools import setup
setup(name='craves_control',
version='0.0.1',
install_requires=['gym', 'argparse', 'pyusb'] # And any other dependencies foo needs
) |
995,426 | 141806630c4251b17f4b16463c7504fcf536b90e | import random
import time
import sys
import csv
# Creates and return list of random elements
root_tree = None
root_list = None
def rand_table_creator(table_size):
random_table = []
counter = 0
while counter < table_size:
k = random.randrange(0,(table_size*10)) # it choose value in range 0 to 4 * table size that is not already in table
if k not in random_table:
random_table.append(k)
counter+=1
return random_table
def timer(f, A):
tic = time.perf_counter()
f(A)
toc = time.perf_counter()
return round(toc - tic, 5)
class Node:
def __init__(self, data):
self.left = None
self.right = None
self.data = data
# Insert method to create nodes
def insert(self, data):
if self.data:
if data < self.data:
if self.left is None:
self.left = Node(data)
else:
self.left.insert(data)
elif data > self.data:
if self.right is None:
self.right = Node(data)
else:
self.right.insert(data)
else:
self.data = data
# findval method to compare the value with nodes
def findval(self, lkpval):
if lkpval < self.data:
if self.left is None:
return str(lkpval)
return self.left.findval(lkpval)
elif lkpval > self.data:
if self.right is None:
return str(lkpval)
return self.right.findval(lkpval)
else:
return self.data
# Print the tree
def PrintTree(self):
if self.left:
self.left.PrintTree()
print( self.data),
if self.right:
self.right.PrintTree()
def sort_tree(arr1):
root = None
t = arr1.pop(0)
root = Node(t)
for v in arr1:
root.insert(v)
global root_tree
root_tree = root
def upgradeheight(root):
if root == None:
return 0
else:
return 1+ max(upgradeheight(root.left), upgradeheight(root.right))
class Node_linked_list:
def __init__(self, data=None, next=None):
self.data = data
self.next = next
def insert1(self, value):
if self == None or self.data < value:
return Node_linked_list(value,self)
else:
current = self
while current.next != None and current.next.data > value:
current = current.next
new1 = Node_linked_list(value,current.next)
current.next = new1
return self
def search_linked_list(arr1):
for value in arr1:
global root_list
root = root_list
while root.data != value:
root = root.next
def sort_linked_list(array1):
root = None
root = Node_linked_list(array1.pop(0), root)
for value in array1:
root = root.insert1(value)
global root_list
root_list = root
return root
def search_tree(arr1):
for n in arr1:
root_tree.findval(n)
def delate_list(arr1):
global root_list
root = root_list
while root != None:
curr = root
del root
root = curr.next
del root
def delate_tree(root):
if root != None:
delate_tree(root.left)
delate_tree(root.right)
del root
def delate_tree_first_call(arr):
global root_tree
delate_tree(root_tree)
def data_input(begining, step, number_of_steps, creation_time_dict, search_time_dict, delate_time_dict):
for x in range(begining,(number_of_steps*step) + begining+1, step):
arr1 = rand_table_creator(x)
arr2 = arr1
creation_time_dict["Number_of_elements"].append(x)
creation_time_dict["Linked_list"].append(timer(sort_linked_list, arr1))
creation_time_dict["Binary_search_tree"].append(timer(sort_tree, arr2))
search_time_dict["Number_of_elements"].append(x)
search_time_dict["Linked_list"].append(timer(search_linked_list, arr1))
search_time_dict["Binary_search_tree"].append(timer(search_tree, arr2))
delate_time_dict["Number_of_elements"].append(x)
delate_time_dict["Linked_list"].append(timer(delate_list, arr1))
delate_time_dict["Binary_search_tree"].append(timer(delate_tree_first_call, arr2))
def dictcreator():
data_set_algorithms = {}
data_set_algorithms["Number_of_elements"] = []
data_set_algorithms["Linked_list"] = []
data_set_algorithms["Binary_search_tree"] = []
return data_set_algorithms
if __name__ == '__main__':
dict1 = dictcreator()
dict2 = dictcreator()
dict3 = dictcreator()
data_input(1000,1000,15,dict1,dict2, dict3)
list = [dict1, dict2, dict3]
SortingMethods = ["Creation_time", "Search_time", "Delate_time"]
for i in range(0, 3):
My_Dict = list[i]
zd = zip(*My_Dict.values())
with open(SortingMethods[i] + ".csv", 'w') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(My_Dict.keys())
writer.writerows(zd)
|
995,427 | 903ac24d4520776456e4a0aa4d88988cbb2a6cb0 | from environs import Env
from flask import Flask, jsonify, request
from flask_cors import CORS
from .campaign_queries import (create_campaign_confs, create_campaign_for_user,
get_campaign_configs, get_campaigns_for_user)
app = Flask(__name__)
CORS(app)
env = Env()
db_conf = {
"db": env("CHATBASE_DATABASE"),
"user": env("CHATBASE_USER"),
"host": env("CHATBASE_HOST"),
"port": env("CHATBASE_PORT"),
"password": env("CHATBASE_PASSWORD", None),
}
@app.route("/campaigns", methods=["GET"])
def get_campaigns():
email = request.args.get("email")
if email:
res = get_campaigns_for_user(email, db_conf)
return jsonify(res), 200
else:
# get all active campaigns???
pass
@app.route("/campaigns", methods=["POST"])
def create_campaign():
email = request.args.get("email")
name = request.args.get("name")
res = create_campaign_for_user(email, name, db_conf)
return res, 201
@app.route("/campaigns/:campaignid/confs/:conf_type", methods=["POST"])
def create_conf(campaignid, conf_type):
dat = request.json
create_campaign_confs(campaignid, conf_type, dat, db_conf)
return "OK", 200
@app.route("/campaigns/:campaignid/confs", methods=["GET"])
def get_confs(campaignid):
res = get_campaign_configs(campaignid, db_conf)
return jsonify(res), 200
def create_image():
fi = request.files.get("file")
if fi and allowed_file(fi.filename):
b = fi.read()
s = base64.b64encode(b).decode()
# make image in facebook api with those bytes
# ...
# store in database and do this elsewhere???
# state.account.create_ad_image(params = {AdImage.Field.bytes: s, AdImage.Field.name: 'vlab-mnm-test'})
else:
return "poop", 400
|
995,428 | a1b0a3bff7d902f4e094e91dd8a0cc90c97fa3f4 | # Generated by Django 2.2 on 2019-05-08 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('session', '0007_auto_20190505_0342'),
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('x', models.PositiveSmallIntegerField(help_text='보드 위 x 좌표')),
('y', models.PositiveSmallIntegerField(help_text='보드 위 y 좌표')),
],
),
]
|
995,429 | afdfaf841ed65d10dd67ad178ef9bf826003c745 | # 코드 5-2 값을 비교하는 코드
print(1 < 2)
print(2 < 1)
|
995,430 | 5fd72e1e221c32820df0b3de567cd1ac5a2c7576 | from flask import Flask, render_template ,request,url_for,escape,session,redirect,abort
import sqlite3 as sql
# import admin
from flask_bcrypt import Bcrypt
app = Flask(__name__)
app.secret_key = 'any random string'
bcrypt = Bcrypt(app)
def Convert(tup, di):
di = dict(tup)
return di
# Show PROFESSOR list
def show_profl():
@app.route('/show_profs')
def show_profs():
return render_template('show_profs.html', lisp=session['lisp'],exist=session['exist'])
# Display feedback FORM
def feedform():
@app.route('/stform')
def stform():
print(session['branch'],session['semester'])
return render_template('stform.html', branch=session['branch'],semester=session['semester'])
# FEEDBACK MODULE......
def feedmodule():
@app.route('/feedback', methods=['POST', 'GET'])
def feedback():
if request.method == 'POST':
lecturer = request.form['lecturer']
st_rollno = session['roll_no']
year = session['year']
semester = session['semester']
branch = session['branch']
subject = request.form['subject']
preparation = request.form['preparedness']
information = request.form['informative']
explanation = request.form['explaining']
pace = request.form['pace']
leadership = request.form['leading']
receptive = request.form['receptive']
interest = request.form['interest']
discussion = request.form['discussion']
learning = request.form['learn']
rapport = request.form['rapport']
available = request.form['available']
current = [lecturer,st_rollno,year,semester,branch,subject]
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("select lecturer,st_rollno,year,semester,branch,subject from feedback where st_rollno=?",[session['roll_no']])
alrdyexist=cur.fetchall()
for i in range(0,len(alrdyexist)):
if list(alrdyexist[i]) == current:
msgx="This feedback is already registered"
con.close()
return render_template('show_profs.html', lisp=session['lisp'], msgx=msgx, exist=session['exist'])
else:
cur.execute("INSERT INTO feedback (lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available)VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",(lecturer,st_rollno,year,semester,branch,subject,preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available,))
con.commit()
cur.execute("select distinct lecturer,subject from feedback where st_rollno=?", [session['roll_no']])
global lis1,lis2,lis3
lis2 = cur.fetchall()
lis3 = [x for x in lis1 if x not in lis2]
dictionary = {}
session['lisp'] = Convert(lis3, dictionary)
session['exist'] = Convert(lis2, dictionary)
con.close()
return redirect(url_for('show_profs'))
#view RESPONSE
# def viewres():
# @app.route('/response', methods=['POST', 'GET'])
def response():
if request.method == 'POST':
restech = request.form['restech']
ressub = request.form['ressub']
print(restech)
print(ressub)
con = sql.connect("database.db")
cur = con.cursor()
cur.execute("select preparation,information,explanation,pace,leadership,receptive,interest,discussion,learning,rapport,available from feedback where st_rollno=? and lecturer=? and subject=?",(session['roll_no'],restech,ressub))
out=cur.fetchall()
res = [item for t in out for item in t]
print(res)
return render_template('response.html', res=res,branch=session['branch'],semester=session['semester'],restech=restech,ressub=ressub)
else:
print('this worked')
|
995,431 | a23b17a176629e582bfb344f9cefc0bfca36b160 | """===========================================
파일이름 : file_sort_day_folder.py
함수기능 : 실행시 같은 폴더 내에 있는 파일들의 목록을 텍스트 파일로 만든후 생성 날짜별로 폴더 생성후 정리
최초개발 : 2018-04-22
최종수정 : 2018-04-28
copyright ⓒ 2017 S.W.Yang All Rights Reserved
==============================================
2018-04-25
-> 파일이름 내부의 공백 문자 인식 수정
2018_04_25
-> 프로그램 실행시 file_list.txt.파일 폴더로 위치이동 오류 수
2018-04-28
-> dir명령어 수정으로 디렉토리 탐색 제외
-> 실행 속도가 빨라짐
==========================================="""
import sys, os
os.system("dir > file_list.txt")
all_lst = open("file_list.txt", "r")
lines = all_lst.readlines()
for i in range(len(lines)):
if(lines[i].count("오전") or lines[i].count("오후")):
if(lines[i][39:-1] == "file_sort_day_forder.py"):
continue
if(lines[i][39:-1] == "file_list.txt"):
continue
#if(not(lines[i].count("<DIR>"))):
os.system("mkdir %s" % lines[i][:10])
os.system("move %s %s" % (lines[i][39:-1].replace(" ", "?"), lines[i][:10]))
|
995,432 | cb12f43ecc884d1af31da4d23eb3a2caa8499246 | /Users/andrewbeatty/anaconda3/lib/python3.7/abc.py |
995,433 | e7a0e9cadbc7f65eecb5f926e068cdd72feea474 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
from selenium.webdriver.remote import mobile
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.action_chains import ActionChains
import pickle
chromedriver_path = '/usr/bin/chromedriver'
brave_path = '/usr/bin/brave-browser'
option = webdriver.ChromeOptions()
option.add_argument("user-data-dir=selenium")
option.binary_location = brave_path
browser = webdriver.Chrome(executable_path=chromedriver_path,options=option)
browser.get('https://support.snapchat.com/en-US/i-need-help?start=5695496404336640')
cookies = pickle.load(open("cookies.pkl", "rb"))
for cookie in cookies:
browser.add_cookie(cookie)
fusername = 'nnn'
# wait = WebDriverWait(browser, 10)
WebDriverWait(browser, 10).until(EC.visibility_of_all_elements_located((By.CSS_SELECTOR, '#field-24281229')))
search_form = browser.find_element(By.TAG_NAME, "form")
username = search_form.find_element_by_xpath('//*[@id="field-24281229"]')
username.send_keys("nmm")
email = search_form.find_element_by_xpath('//*[@id="field-24335325"]')
email.send_keys("dummy@gmail.com")
mobile_num = search_form.find_element_by_xpath('//*[@id="field-24369716"]')
mobile_num.send_keys("8888888888")
device = search_form.find_element_by_xpath('//*[@id="field-24369726"]')
device.send_keys("Vivo Z1 Pro")
friend_username = search_form.find_element_by_xpath('//*[@id="field-24369736"]')
friend_username.send_keys(fusername)
today = search_form.find_element_by_xpath('//*[@id="field-24326423"]')
today.send_keys('Today')
streak = search_form.find_element_by_xpath('//*[@id="field-24641746"]')
streak.send_keys('200')
streak.send_keys(Keys.TAB,'N',Keys.TAB,"My Streak Disappeared.")
# pickle.dump(browser.get_cookies() , open("cookies.pkl","wb"))
submit = search_form.find_element_by_xpath('//*[@id="submit-button"]')
submit.click()
sleep(5)
browser.close()
|
995,434 | 1db971ec39708916b7e4e8585d01bdf17ea28e8b | import sys
sys.stdin = open('2108_input.txt')
'''
수를 처리하는 것은 통계학에서 상당히 중요한 일이다.
통계학에서 N개의 수를 대표하는 기본 통계값에는 다음과 같은 것들이 있다. 단, N은 홀수라고 가정하자.
산술평균 : N개의 수들의 합을 N으로 나눈 값
중앙값 : N개의 수들을 증가하는 순서로 나열했을 경우 그 중앙에 위치하는 값
최빈값 : N개의 수들 중 가장 많이 나타나는 값
범위 : N개의 수들 중 최댓값과 최솟값의 차이
N개의 수가 주어졌을 때, 네 가지 기본 통계값을 구하는 프로그램을 작성하시오.
첫째 줄에는 산술평균을 출력한다. 소수점 이하 첫째 자리에서 반올림한 값을 출력한다.
둘째 줄에는 중앙값을 출력한다.
셋째 줄에는 최빈값을 출력한다. 여러 개 있을 때에는 최빈값 중 두 번째로 작은 값을 출력한다.
넷째 줄에는 범위를 출력한다.
'''
n = int(input())
a = []
a_dic = {}
answer = []
for _ in range(n):
k = int(input())
a.append(k)
if str(k) in a_dic:
a_dic[str(k)] += 1
else:
a_dic[str(k)] = 1
a.sort()
a_dic = sorted(a_dic.items(), key=(lambda x: x[1]), reverse=True)
# print(a)
# print(a_dic)
answer.append(round(sum(a)/n))
answer.append(a[n//2])
answer_mode = [int(a_dic[0][0])]
if n != 1:
for i in range(1, len(a_dic)):
if a_dic[i][1] != a_dic[i-1][1]:
break
else:
answer_mode.append(int(a_dic[i][0]))
answer_mode.sort()
if len(answer_mode) != 1:
answer.append(answer_mode[1])
else:
answer.append(answer_mode[0])
else:
answer.append(answer_mode[0])
answer.append(max(a)-min(a))
for i in range(4):
print(answer[i]) |
995,435 | 474342fe66d842004d289e060450cba0f5d9e9b0 | '''
Proyecto GesCred
----------------
Descripcion: Gestor de Credenciales, aplicacion para la administracion
de ususarios y claves de accesos de tus cuentas personal.
El proyecto fue programado en Visual Studio 2015 Community.
Autor: Sergio Marquez (OneLog - onelog@protonmail.ch)
Fecha: 29/08/2016
Version: 1.0.0
'''
import tkinter
import colorsys
print('#' * 80, end = '\n')
print('Hola Mundo')
|
995,436 | 2217e60e3334f722ed8f27d149efc134a7874615 | from src.planners.planner import Planner
from src.models.task import Task
from typing import List
from math import gcd, floor , pow
from src.models.execution_matrix import ExecutionMatrix
from copy import deepcopy
import functools
class RateMonotonicPlanner(Planner):
def __init__(self, tasks: List[Task], processors: int = 1):
super().__init__(tasks, processors)
if not self.is_planeable():
print("RateMonotonic no cumple el factor de utilizacion del conjunto de tareas, igual se intentara planificar pero no entraran todas las tareas")
self.matrix = None
def is_planeable(self):
n = len(self.tasks)
max_threshold = n*(pow(2 , (1/n) ) - 1)
list_map_task = list(map(lambda t: t.compute_time/t.deadline , self.tasks))
utilization_factor = functools.reduce(lambda a,b: a+b , list_map_task)
return utilization_factor <= max_threshold
def sort_tasks(self, tasks: List[Task]):
return sorted(tasks, key=lambda x: x.deadline)
def get_plan(self) -> ExecutionMatrix:
self.matrix = ExecutionMatrix(self.processors, self.hyperperiod)
tasks_to_add = []
for x in range(self.hyperperiod):
for t in self.tasks:
if self.can_add_task(t, x) and not self.is_task_with_same_id_in_list(t, tasks_to_add):
tasks_to_add.append(t)
tasks_to_add = self.sort_tasks(tasks_to_add)
for p in range(self.processors):
processor = self.matrix.processors[p]
processor.add_time_unit()
if len(tasks_to_add) == 0:
continue
if processor.is_free():
processor.set_task(tasks_to_add.pop(0))
else:
current_task = processor.get_current_task()
if current_task.deadline > tasks_to_add[0].deadline:
t_copy = deepcopy(current_task)
t_copy.compute_time = t_copy.compute_time - (x - processor.get_task_last_start_time(current_task))
tasks_to_add.append(t_copy)
processor.set_task(tasks_to_add.pop(0))
tasks_to_add = self.sort_tasks(tasks_to_add)
return self.matrix
def can_add_task(self, task: Task, time: int) -> bool:
last_executed_deadline = floor(self.matrix.get_last_time_task_started(task) / task.deadline)
current_deadline = floor(time / task.deadline)
return last_executed_deadline < current_deadline and self.hyperperiod >= time + task.compute_time
"""
This method compares 'taks_id' property in tasks, not the reference
"""
def is_task_with_same_id_in_list(self, task: Task, tasks_list: List[Task]) -> bool:
for x in tasks_list:
if x.task_id == task.task_id:
return True
return False
|
995,437 | 262bab792d5383c0cf00124326e63bfb866d319f | from flask.ext.wtf import Form
from wtforms import TextField, BooleanField, TextAreaField, SelectField, IntegerField, validators
from wtforms.validators import Required, Length
from flask.ext.babel import gettext
from app.models import User
class LoginForm(Form):
openid = TextField('openid', validators = [Required()])
remember_me = BooleanField('remember_me', default = False)
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 500)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
if self.nickname.data != User.make_valid_nickname(self.nickname.data):
self.nickname.errors.append(gettext('This nickname has invalid characters. Please use letters, numbers, dots and underscores only.'))
return False
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append(gettext('This nickname is already in use. Please choose another one.'))
return False
return True
class PostForm(Form):
subject = TextField('subject', validators = [Required(), Length(min = 1, max = 140)])
post = TextAreaField('post', validators = [Required(), Length(min = 1, max = 1000)])
public = BooleanField('public', default = True)
class AnswerForm(Form):
answer = TextAreaField('answer', validators = [Required(), Length(min = 1, max = 1000)])
class EmailGroupForm(Form):
recipients = TextAreaField('recipients', validators = [Required()])
class CommentForm(Form):
comment = TextAreaField('comment', validators = [Length(min = 1, max = 1000)])
class GroupPost(Form):
group_access = BooleanField('group_access', default = False)
class SearchForm(Form):
search = TextField('search', validators = [Required(), Length(min = 1, max = 80)])
search_type = SelectField('search_type', choices=[('User','Nickname or Email'), ('Group', 'Group Name')]) #('Post', 'Prayer Request'), Restrict to only post that user has permission to view.
class ChurchForm(Form):
church_name = TextField('church_name', validators = [Required(), Length(min = 1, max = 80)])
about_church = TextAreaField('about_church', validators = [Length(max = 1000)])
public = BooleanField('public', default = True)
class GroupForm(Form):
group_name = TextField('group_name', validators = [Required(), Length(min = 1, max = 80)])
about_group = TextAreaField('about_group', validators = [Length(max = 500)])
public = BooleanField('public', default = True)
class AddressForm(Form):
datetime = TextField('datetime', validators = [Required(), Length(min = 1, max = 140)])
address = TextField('address', validators = [Required(), Length(min = 1, max = 140)])
address2 = TextField('address2', validators = [Length(min = 0, max = 25)])
city = TextField('city', validators = [Required(), Length(min = 1, max = 80)])
state = TextField('state', validators = [Required(), Length(min = 1, max = 80)])
zipcode = IntegerField('zipcode')#, validators = [validators.Regexp("^\d{5}(?:[-\s]\d{4})?$", message = "Must be a valid US zipcode")])
directions = TextAreaField('directions', validators = [Length(min = 0, max = 500)])
class BibleVerseForm(Form):
verse = TextField('verse', validators = [Required()])
spritz_verses = TextAreaField('spritz_verses', validators = [Required()])
|
995,438 | c6d0f54656f585420c687025a0d549d3c6b2facf | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('uberjobs', '0003_auto_20150329_1936'),
]
operations = [
migrations.AddField(
model_name='econcategory',
name='category_id',
field=models.IntegerField(default=1),
preserve_default=True,
),
]
|
995,439 | 3121c9d19fcb839957f96f5effeb09b5167d52d6 | n=int(input(" "))
if(n<=10000000 and n>0):
print(n+1)
else:
print("invalid")
|
995,440 | f65c9df90ccefaf4e1d749f591d1f2a3cd2c353f | from .node_clustering import NodeClustering
from .edge_clustering import EdgeClustering
from .fuzzy_node_clustering import FuzzyNodeClustering
|
995,441 | 206941fc791f1e270bad65577011e6885ecb4380 | ## ## ## param.py v.0.1
## ## ## This program returns common parameters
## ## ## Created: 08/05/2012 - KDP
## ## ## Last Edited: 08/05/2012 - KDP
def lj(string):
"""
Returns common parameters for lj argon.
ntpy.param.lj(string)
Parameters
----------
string : str
A string that corresponds to a lennard jones argon parameter.
"""
ljparams = dict({
'lat0' : 5.269, # Lattice constant for argon at 0 K in angstroms
'lat20' : 5.315, # Lattice constant for argon at 20 K in angstroms
'lat35' : 5.355, # Lattice constant for argon at 35 K in angstroms
'lat50' : 5.401, # Lattice constant for argon at 50 K in angstroms
'lat65' : 5.455, # Lattice constant for argon at 65 K in angstroms
'lat80' : 5.527, # Lattice constant for argon at 80 K in angstroms
'epsilon' : 1.67e-21, # Epsilon constant for argon in joules
'sigma' : 3.40e-10, # Sigma constant for argon in meters
'mass' : 6.63e-26, # Mass constant for argon in kilograms
'tau' : 2.14e-12 # Tau constant for argon in seconds
})
try:
return ljparams[string]
except KeyError, e:
print "KeyError: %s is not a valid key for ntpy.param.lj()." % e
raise
##### END LJ
def const(string):
"""
Returns common constant parameters.
ntpy.param.const(string)
Parameters
----------
string : str
A string that corresponds to a constant parameter.
"""
constparams = dict({
'kb' : 1.3806e-23, # Boltzmann's constant
'hbar' : 1.054e-34, # Planck's constant
'topeta' : 1e15, # To peta-
'totera' : 1e12, # To tera-
'togiga' : 1e9, # To giga-
'tomega' : 1e6, # To mega-
'tokilo' : 1e3, # To kilo-
'tocenti' : 1e-2, # To centi-
'tomilli' : 1e-3, # To milli-
'tomicro' : 1e-6, # To micro-
'tonano' : 1e-9, # To nano-
'topico' : 1e-12, # To pico-
'tofemto' : 1e-15, # To femto-
})
try:
return constparams[string]
except KeyError, e:
print "KeyError: %s is not a valid key for ntpy.param.const()." % e
raise
##### END LJ
|
995,442 | cc2c7cd4e737a6cc4fd8073f967ffe6519b5d488 | #!/usr/bin/python
import networkx as nx
import parser
import optparse
import os
import
def centrality(edgeList, ctype):
"""
"""
print "centrality start"
file = open(edgeList, "r")
graph = nx.read_edgelist(file, comments="#", create_using=nx.DiGraph(), nodetype=int)
file.close()
N = nx.number_of_nodes(graph)
if ctype == "out_degree":
centrality = nx.out_degree_centrality(graph)
elif ctype == "betweenness":
centrality=nx.betweenness_centrality(graph, k=int(N/100))
elif ctype == "height":
return 1
else:
centrality = nx.closeness_centrality(graph)
return centrality
def colorPercentile(modelFile, centrality):
"""
"""
print "colorPercentile start"
# Get ids of nodes in model and sort by decreasing centrality
modelFile = open(modelFile, "r")
model=[(0,None)]
for line in modelFile:
line=line.split()
if line[1]!='None':
model+=[(int(line[0]), int(line[1]))]
a=[(centrality[x[1]], x[1]) for x in model if x[1] != None]
s=set(a)
a=sorted(list(s), reverse=True)
print "sorting done"
# Assign colors to nodes
length=len(a)
percentLen = int(length*0.01)
colors = {}
# Top 1%
for i in range(percentLen):
colors[a[i][1]]="darkred"
# 1-5
for i in range(percentLen, percentLen*5):
colors[a[i][1]]="red"
# 5-10
for i in range(percentLen*5, percentLen*10):
colors[a[i][1]]="mediumred"
# 10-15
for i in range(percentLen*10, percentLen*15):
colors[a[i][1]]="lightred"
# 15-25
for i in range(percentLen*15, percentLen*25):
colors[a[i][1]]="pink"
for i in range(percentLen*25,length):
colors[a[i][1]]="white"
print "colorPercentile done"
return colors, model
def writeColors(title, model, contentFile, colors, ctype):
print "writeColors start"
# Write style sheet
if not os.path.isdir('centrality'):
os.mkdir('centrality')
colorFile = open("centrality/"+("test_"+ctype+"_"+title).replace(" ", "_")+".html", "w")
colorFile.write("<!DOCTYPE html>\n<html>\n<head>\n<style/>\n")
colorFile.write(".white {\n\tbackground-color: white;\n\tcolor: black;\n}\n")
colorFile.write(".pink {\n\tbackground-color: #ffcccc;\n\tcolor: black;\n}\n")
colorFile.write(".lightred {\n\tbackground-color: #ff9999;\n\tcolor: black;\n}\n")
colorFile.write(".mediumred {\n\tbackground-color: #ff4d4d;\n\tcolor: black;\n}\n")
colorFile.write(".red {\n\tbackground-color: #cc0000;\n\tcolor: black;\n}\n")
colorFile.write(".darkred {\n\tbackground-color: #990000;\n\tcolor: blacj=k;}\n")
colorFile.write("</style>\n</head>\n")
# Write content
colorFile.write("<body>\n")
contentFile=open(contentFile,"r")
content=[]
for line in contentFile:
content+=[line.split()]
contentFile.close()
pos=0
dif = model[pos+1][0] - model[pos][0]
color="white"
for line in content:
current = "<p><span class="+color+">"
for i in range(len(line)):
if dif == 0:
while dif==0:
pos+=1
color=colors[model[pos][1]]
dif = model[pos+1][0] - model[pos][0]
current+="</span><span class="+color+">"
current+=line[i]+ " "
dif-=1
current+="</span></p>\n"
colorFile.write(current)
colorFile.write("</body>\n</html>")
colorFile.close()
print "writeColors done"
def wiki2centrality(title, remove, ctype):
"""
"""
parser.wiki2snap(title, remove)
if remove:
file = title.replace(" ", "_") + "_rem.txt"
else:
file = title.replace(" ", "_") + ".txt"
centralityDict = centrality("edgelists/" + file, ctype)
colors, model = colorPercentile("models/"+file, centralityDict)
writeColors(title, model, "content/"+file, colors, ctype)
def parse_args():
"""parse_args parses sys.argv for wiki2centrality."""
# Help Menu
parser = optparse.OptionParser(usage='%prog [options] title')
parser.add_option('-r', '--remove',
action='store_false', dest='remove', default=True,
help='remove mass deletions')
parser.add_option('-c', '--centrality',
type='str', dest='ctype', default='closeness',
help='type of centrality: closeness, out_degree, betweenness',
metavar='CTYPE')
(opts, args) = parser.parse_args()
# Parser Errors
if len(args) != 1:
parser.error('incorrect number of arguments')
wiki2centrality(args[0], remove=opts.remove, ctype=opts.ctype)
if __name__ == '__main__':
parse_args()
|
995,443 | 53aa2eb803e10553d3145d106d2c3fc71a888230 | def type_finder(l):
return [str(item) for item in l if (type(item) == int or type(item) == float)]
# new_list = []
# for item in l:
# if type(item) == int:
# new_list.append(str(item))
# return new_list
list_items = [1,2,3,4,(1,2,3,4,),{1:3,3:5,},'rohit','mohit',1.1,2.5]
print(type_finder(list_items))
|
995,444 | 643a56b48b2379f8ff1f69754169afdf93bb6ea6 | # -*- coding:UTF-8 -*-
"""
命令行入口文件,非IDE执行
@author: hikaru
email: hikaru870806@hotmail.com
如有问题或建议请联系
"""
import os
import sys
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..\\.."))
if root_path not in sys.path:
sys.path.append(root_path)
os.chdir(root_path)
import steamCommon
import badges
account_id = steamCommon.get_account_id_from_file()
badges.main(account_id)
|
995,445 | 7414dd5d2edfa42e0b76ca902cd94f133d911ec6 | #
# @lc app=leetcode.cn id=202 lang=python3
#
# [202] 快乐数
#
# @lc code=start
class Solution:
def isHappy(self, n: int) -> bool:
cache = set()
while n!=1:
n = sum([ int(i) ** 2 for i in str(n)])
if n in cache:
return False
else:
cache.add(n)
else:
return True
# @lc code=end
|
995,446 | 6ad06ca98e337a65e980c2baf81414de802bd0e7 | X = input("自分の好みのアルファベットの順番を記載してください:")
pos = {}
for i in range(26):
pos[X[i]] = i + 1
#print(pos)
N = input("国民数を入力してください:")
S = []
for n in range(int(N)):
name = input("国民の名前を入力してください:")
S.append(name)
for j in range(int(N)):
for k in range((int(N)-1), j, -1):
word_count1 = len(S[k])
word_count2 = len(S[k - 1])
if word_count2 <= word_count1:
for l in range(word_count2):
if int(pos.get(list(S[k - 1])[l])) == int(pos.get(list(S[k])[l])):
continue
elif int(pos.get(list(S[k - 1])[l])) < int(pos.get(list(S[k])[l])):
S[k], S[k - 1] = S[k], S[k -1]
#print(S)
break
elif int(pos.get(list(S[k - 1])[l])) > int(pos.get(list(S[k])[l])):
S[k - 1], S[k] = S[k], S[k -1]
#print(S)
break
else:
pass
else:
#word_count2 > word_count1
for m in range(word_count1):
if int(pos.get(list(S[k - 1])[m])) == int(pos.get(list(S[k])[m])):
continue
elif int(pos.get(list(S[k - 1])[m])) < int(pos.get(list(S[k])[m])):
S[k], S[k - 1] = S[k], S[k -1]
#print(S)
break
elif int(pos.get(list(S[k - 1])[m])) > int(pos.get(list(S[k])[m])):
S[k - 1], S[k] = S[k], S[k -1]
#print(S)
break
else:
S[k - 1], S[k] = S[k], S[k -1]
#print(S)
#print(S)
for x in range(int(N)):
print(S[x])
|
995,447 | 68514542c03b5276511430f06d77b5606383311e | """
This class has functions for dealing with Fibonacci numbers, and Fibonacci
sequences. For an explanation of Fibonacci numbers and Fibonacci sequences,
see https://en.wikipedia.org/wiki/Fibonacci_number
"""
from typing import Generator
class Fibonacci:
# This is a list of the first 301 Fibonacci numbers from
# "The first 300 Fibonacci numbers, completely factorised"
# at http://www.maths.surrey.ac.uk/hosted-sites/R.Knott/Fibonacci/fibtable.html
# [The referenced Web page lists 301 Fibonacci numbers, those for n=0 through n=300.]
EXPECTED_FIBONACCI_SEQUENCE = [
0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610, 987, 1597,
2584, 4181, 6765, 10946, 17711, 28657, 46368, 75025, 121393, 196418,
317811, 514229, 832040, 1346269, 2178309, 3524578, 5702887, 9227465,
14930352, 24157817, 39088169, 63245986, 102334155, 165580141,
267914296, 433494437, 701408733, 1134903170, 1836311903, 2971215073,
4807526976, 7778742049, 12586269025, 20365011074, 32951280099,
53316291173, 86267571272, 139583862445, 225851433717, 365435296162,
591286729879, 956722026041, 1548008755920, 2504730781961,
4052739537881, 6557470319842, 10610209857723, 17167680177565,
27777890035288, 44945570212853, 72723460248141, 117669030460994,
190392490709135, 308061521170129, 498454011879264, 806515533049393,
1304969544928657, 2111485077978050, 3416454622906707, 5527939700884757,
8944394323791464, 14472334024676221, 23416728348467685,
37889062373143906, 61305790721611591, 99194853094755497,
160500643816367088, 259695496911122585, 420196140727489673,
679891637638612258, 1100087778366101931, 1779979416004714189,
2880067194370816120, 4660046610375530309, 7540113804746346429,
12200160415121876738, 19740274219868223167, 31940434634990099905,
51680708854858323072, 83621143489848422977, 135301852344706746049,
218922995834555169026, 354224848179261915075, 573147844013817084101,
927372692193078999176, 1500520536206896083277, 2427893228399975082453,
3928413764606871165730, 6356306993006846248183,
10284720757613717413913, 16641027750620563662096,
26925748508234281076009, 43566776258854844738105,
70492524767089125814114, 114059301025943970552219,
184551825793033096366333, 298611126818977066918552,
483162952612010163284885, 781774079430987230203437,
1264937032042997393488322, 2046711111473984623691759,
3311648143516982017180081, 5358359254990966640871840,
8670007398507948658051921, 14028366653498915298923761,
22698374052006863956975682, 36726740705505779255899443,
59425114757512643212875125, 96151855463018422468774568,
155576970220531065681649693, 251728825683549488150424261,
407305795904080553832073954, 659034621587630041982498215,
1066340417491710595814572169, 1725375039079340637797070384,
2791715456571051233611642553, 4517090495650391871408712937,
7308805952221443105020355490, 11825896447871834976429068427,
19134702400093278081449423917, 30960598847965113057878492344,
50095301248058391139327916261, 81055900096023504197206408605,
131151201344081895336534324866, 212207101440105399533740733471,
343358302784187294870275058337, 555565404224292694404015791808,
898923707008479989274290850145, 1454489111232772683678306641953,
2353412818241252672952597492098, 3807901929474025356630904134051,
6161314747715278029583501626149, 9969216677189303386214405760200,
16130531424904581415797907386349, 26099748102093884802012313146549,
42230279526998466217810220532898, 68330027629092351019822533679447,
110560307156090817237632754212345, 178890334785183168257455287891792,
289450641941273985495088042104137, 468340976726457153752543329995929,
757791618667731139247631372100066, 1226132595394188293000174702095995,
1983924214061919432247806074196061, 3210056809456107725247980776292056,
5193981023518027157495786850488117, 8404037832974134882743767626780173,
13598018856492162040239554477268290,
22002056689466296922983322104048463,
35600075545958458963222876581316753,
57602132235424755886206198685365216,
93202207781383214849429075266681969,
150804340016807970735635273952047185,
244006547798191185585064349218729154,
394810887814999156320699623170776339,
638817435613190341905763972389505493,
1033628323428189498226463595560281832,
1672445759041379840132227567949787325,
2706074082469569338358691163510069157,
4378519841510949178490918731459856482,
7084593923980518516849609894969925639,
11463113765491467695340528626429782121,
18547707689471986212190138521399707760,
30010821454963453907530667147829489881,
48558529144435440119720805669229197641,
78569350599398894027251472817058687522,
127127879743834334146972278486287885163,
205697230343233228174223751303346572685,
332825110087067562321196029789634457848,
538522340430300790495419781092981030533,
871347450517368352816615810882615488381,
1409869790947669143312035591975596518914,
2281217241465037496128651402858212007295,
3691087032412706639440686994833808526209,
5972304273877744135569338397692020533504,
9663391306290450775010025392525829059713,
15635695580168194910579363790217849593217,
25299086886458645685589389182743678652930,
40934782466626840596168752972961528246147,
66233869353085486281758142155705206899077,
107168651819712326877926895128666735145224,
173402521172797813159685037284371942044301,
280571172992510140037611932413038677189525,
453973694165307953197296969697410619233826,
734544867157818093234908902110449296423351,
1188518561323126046432205871807859915657177,
1923063428480944139667114773918309212080528,
3111581989804070186099320645726169127737705,
5034645418285014325766435419644478339818233,
8146227408089084511865756065370647467555938,
13180872826374098837632191485015125807374171,
21327100234463183349497947550385773274930109,
34507973060837282187130139035400899082304280,
55835073295300465536628086585786672357234389,
90343046356137747723758225621187571439538669,
146178119651438213260386312206974243796773058,
236521166007575960984144537828161815236311727,
382699285659014174244530850035136059033084785,
619220451666590135228675387863297874269396512,
1001919737325604309473206237898433933302481297,
1621140188992194444701881625761731807571877809,
2623059926317798754175087863660165740874359106,
4244200115309993198876969489421897548446236915,
6867260041627791953052057353082063289320596021,
11111460156937785151929026842503960837766832936,
17978720198565577104981084195586024127087428957,
29090180355503362256910111038089984964854261893,
47068900554068939361891195233676009091941690850,
76159080909572301618801306271765994056795952743,
123227981463641240980692501505442003148737643593,
199387062373213542599493807777207997205533596336,
322615043836854783580186309282650000354271239929,
522002106210068326179680117059857997559804836265,
844617150046923109759866426342507997914076076194,
1366619256256991435939546543402365995473880912459,
2211236406303914545699412969744873993387956988653,
3577855662560905981638959513147239988861837901112,
5789092068864820527338372482892113982249794889765,
9366947731425726508977331996039353971111632790877,
15156039800290547036315704478931467953361427680642,
24522987531716273545293036474970821924473060471519,
39679027332006820581608740953902289877834488152161,
64202014863723094126901777428873111802307548623680,
103881042195729914708510518382775401680142036775841,
168083057059453008835412295811648513482449585399521,
271964099255182923543922814194423915162591622175362,
440047156314635932379335110006072428645041207574883,
712011255569818855923257924200496343807632829750245,
1152058411884454788302593034206568772452674037325128,
1864069667454273644225850958407065116260306867075373,
3016128079338728432528443992613633888712980904400501,
4880197746793002076754294951020699004973287771475874,
7896325826131730509282738943634332893686268675876375,
12776523572924732586037033894655031898659556447352249,
20672849399056463095319772838289364792345825123228624,
33449372971981195681356806732944396691005381570580873,
54122222371037658776676579571233761483351206693809497,
87571595343018854458033386304178158174356588264390370,
141693817714056513234709965875411919657707794958199867,
229265413057075367692743352179590077832064383222590237,
370959230771131880927453318055001997489772178180790104,
600224643828207248620196670234592075321836561403380341,
971183874599339129547649988289594072811608739584170445,
1571408518427546378167846658524186148133445300987550786,
2542592393026885507715496646813780220945054040571721231,
4114000911454431885883343305337966369078499341559272017,
6656593304481317393598839952151746590023553382130993248,
10770594215935749279482183257489712959102052723690265265,
17427187520417066673081023209641459549125606105821258513,
28197781736352815952563206467131172508227658829511523778,
45624969256769882625644229676772632057353264935332782291,
73822750993122698578207436143903804565580923764844306069,
119447720249892581203851665820676436622934188700177088360,
193270471243015279782059101964580241188515112465021394429,
312718191492907860985910767785256677811449301165198482789,
505988662735923140767969869749836918999964413630219877218,
818706854228831001753880637535093596811413714795418360007,
1324695516964754142521850507284930515811378128425638237225,
2143402371193585144275731144820024112622791843221056597232,
3468097888158339286797581652104954628434169971646694834457,
5611500259351924431073312796924978741056961814867751431689,
9079598147510263717870894449029933369491131786514446266146,
14691098406862188148944207245954912110548093601382197697835,
23770696554372451866815101694984845480039225387896643963981,
38461794961234640015759308940939757590587318989278841661816,
62232491515607091882574410635924603070626544377175485625797,
100694286476841731898333719576864360661213863366454327287613,
162926777992448823780908130212788963731840407743629812913410,
263621064469290555679241849789653324393054271110084140201023,
426547842461739379460149980002442288124894678853713953114433,
690168906931029935139391829792095612517948949963798093315456,
1116716749392769314599541809794537900642843628817512046429889,
1806885656323799249738933639586633513160792578781310139745345,
2923602405716568564338475449381171413803636207598822186175234,
4730488062040367814077409088967804926964428786380132325920579,
7654090467756936378415884538348976340768064993978954512095813,
12384578529797304192493293627316781267732493780359086838016392,
20038668997554240570909178165665757608500558774338041350112205,
32423247527351544763402471792982538876233052554697128188128597,
52461916524905785334311649958648296484733611329035169538240802,
84885164052257330097714121751630835360966663883732297726369399,
137347080577163115432025771710279131845700275212767467264610201,
222232244629420445529739893461909967206666939096499764990979600
]
# Initializer
def __init__(self):
self.max_sequence_length = len(self.EXPECTED_FIBONACCI_SEQUENCE)
# Cache of known Fibonacci numbers
known_cache = {
0: EXPECTED_FIBONACCI_SEQUENCE[0],
1: EXPECTED_FIBONACCI_SEQUENCE[1]
}
def get_expected_fibonacci_sequence(self, sequence_length) -> list:
if not (isinstance(sequence_length, int) and
(1 <= sequence_length <= self.max_sequence_length)):
raise ValueError(
f"sequence_length must be an integer from 1 to {self.max_sequence_length} (inclusive)"
)
return self.EXPECTED_FIBONACCI_SEQUENCE[:sequence_length]
def get_fibonacci_number(self, n) -> int:
"""Recursively generate the nth Fibonacci number"""
if not isinstance(n, int) or n < 0:
raise ValueError("n must be a non-negative integer")
if n in self.known_cache:
return self.known_cache[n]
# Without caching known Fibonacci numbers like this, this function
# will generate a "maximum recursion depth exceeded" error
# (when for sufficiently large Fibonacci numbers).
# That's because Python doesn't do tail recursion elimination.
self.known_cache[n] = self.get_fibonacci_number(
n - 1) + self.get_fibonacci_number(n - 2)
return self.known_cache[n]
def generate_fibonacci_sequence(
self, sequence_length: int) -> Generator[int, None, None]:
if not isinstance(sequence_length, int) or sequence_length < 1:
raise ValueError("sequence_length must be a positive integer")
return (self.get_fibonacci_number(n) for n in range(sequence_length))
if __name__ == '__main__':
# Test a Fibonacci sequence of the maximum length
# (the length of Fibonacci().EXPECTED_FIBONACCI_SEQUENCE)
fibonacci = Fibonacci()
sequence_length = fibonacci.max_sequence_length
expected_sequence = fibonacci.get_expected_fibonacci_sequence(
fibonacci.max_sequence_length)
assert list(Fibonacci().generate_fibonacci_sequence(sequence_length)
) == expected_sequence, "Error in generated Fibonacci sequence"
|
995,448 | 1d63e8fe376462c3acd128b681d20c0800e6cd1a | # Multiples of 13
"""
Write a program that reads two integer numbers X and Y
and calculate the sum of all number not divisible by 13 between them, including both.
Input
The input file contains 2 integer numbers X and Y without any order.
Output
Print the sum of all numbers between X and Y not divisible by 13,
including them if it is the case.
"""
def main():
x = int(input())
y = int(input())
if x > y:
total = x
x = y
y = total
total = 0
for i in range(x, y+1, 1):
if (i % 13) != 0:
total += i
print(total)
if __name__ == "__main__":
main()
|
995,449 | 62ec0e882fd4e7a3341f9f5a8baa17360cca37f3 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('attachments', '0004_name_sanitization'),
('anonymization', '0001_attachmentnormalization'),
]
operations = [
migrations.CreateModel(
name='AttachmentRecognition',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('successful', models.BooleanField(default=False, help_text='True if recognition has succeeded, False otherwise.')),
('file', models.FileField(help_text='Empty filename if recognition failed.', max_length=255, upload_to='attachment_recognitions', blank=True)),
('name', models.CharField(help_text='Attachment recognition file name, e.g. "document.odt". Extension automatically adjusted when creating a new object. Empty, if file.name is empty.', max_length=255, blank=True)),
('content_type', models.CharField(help_text='Attachment recognition content type, e.g. "application/vnd.oasis.opendocument.text". The value may be specified even if recognition failed.', max_length=255, null=True)),
('created', models.DateTimeField(help_text='Date and time the attachment was recognized. Leave blank for current time.', blank=True)),
('size', models.IntegerField(help_text='Attachment recognition file size in bytes. NULL if file is NULL. Automatically computed when creating a new object.', null=True, blank=True)),
('debug', models.TextField(help_text='Debug message from recognition.', blank=True)),
('attachment', models.ForeignKey(to='attachments.Attachment')),
],
options={
},
bases=(models.Model,),
),
]
|
995,450 | 27d4805e1fcb7dad748d94da8afc8571afd87ab8 |
# Osmel Savon
# Assignment 1
# 9/9/16
def nameYr():
first = input("What is your first name? ")
last = input("What is your last name? ")
year = input("In which year were you born? ")
print (first[0:1] + "." + last[0:1] + ". " + year[2:4])
def calcPerc():
score = input ("Enter your test score:")
maxpts = input ("Enter max points possible:")
product = 100 * float(score)/float(maxpts)
print("{0:.2f}".format(product) + "%")
def addLElements():
l1 = [0,1,2,3,4,5]
l2 = [100,101,102,103,104]
position = int(input ("Enter position:"))
total = l1[position] + l2[position]
print ("The total at ", position, " is", total)
|
995,451 | 229285073e7d8d6f8a0f263ff2a894cba031f938 | [x1, y1, y2, x2] = map(int, input().split(" "))
if x1 + x2 > y1 + y2:
print("X")
elif x1 + x2 < y1 + y2:
print("Y")
elif x2 > y1:
print("X")
elif x2 < y1:
print("Y")
else:
print("P")
|
995,452 | 1ccb29ce04d6045a7d6cc1a6ac18833ef45c4095 | #!/usr/bin/env python
# coding: utf-8
import sys
import Image
import ImageFont
import ImageDraw
import requests
import urllib
import os
from StringIO import StringIO
info = open(sys.argv[1],'r').read().split('\n')
uid = info[0]
token = info[1]
target = info[2]
users = [i.split(' ') for i in info[3:]]
while users[-1] == ['']:
users = users[:-1]
avatars = []
res = requests.post('https://api.renren.com/restserver.do',
data={
'method': 'users.getInfo',
'v': '1.0',
'access_token': token,
'format': 'json',
'uids': ','.join(map(lambda t: t[0], users)),
'fields': 'uid,name,mainurl'
}
).json()
for i in users:
for j in res:
if i[0] == j['uid']:
i[2] = j['mainurl']
for i in users:
timg = Image.open(StringIO(requests.get(i[2]).content))
avatars.append(timg.resize((60, 60), Image.ANTIALIAS))
img = Image.new('RGB', (500, 160))
for x in xrange(500):
for y in xrange(160):
img.putpixel((x, y), (255, 255, 255))
j = 0
for i in avatars:
if j == 7:
break
for x in xrange(60):
for y in xrange(60):
v = i.getpixel((x, y))
img.putpixel((10 + 70 * j + x, y + 80), v)
j += 1
font = ImageFont.truetype("wqy-microhei.ttc", 24)
draw = ImageDraw.Draw(img)
draw.text((116, 28), unicode(target, 'UTF-8'), (0, 0, 0), font=font)
zuijinlaifang = Image.open(open('word-zuijinlaifang.png'))
width, height = zuijinlaifang.size
for i in range(5, 5 + width):
for j in range(27, 27 + height):
img.putpixel((i, j), zuijinlaifang.getpixel((i - 5, j - 27)))
img.save(uid + ".png")
aid = None
try:
albs = requests.post('https://api.renren.com/restserver.do',
data={
'method': 'photos.getAlbums',
'v': '1.0',
'access_token': token,
'format': 'json',
'uid': uid,
'count': '1000'
}
).json()
for i in albs:
if i['name'] == u"来访截图":
aid = i['aid']
if not aid:
for i in range(3):
albs = requests.post('https://api.renren.com/v2/album/put',
data={
'access_token': token,
'name': u'来访截图'
}
).json()
if u'response' in albs and u'id' in albs[u'response']:
aid = albs[u'response'][u'id']
if aid:
break
except:
pass
data = {
"v": "1.0",
"access_token": token,
'format': 'json',
'method': 'photos.upload',
'caption': ''.join(["@" + urllib.unquote_plus(p[1]) + "(" + p[0] + ") " for p in users[0:7]]),
}
if aid:
data['aid'] = aid
pic = requests.post("https://api.renren.com/restserver.do", data=data,
files = {
'upload': ('upload.png', open(uid + '.png').read())
}
).json()
j = 0
for user in users[0:7]:
requests.post("https://api.renren.com/restserver.do", data={
"v": "1.0",
"access_token": token,
"format": 'json',
'method': 'photos.tag',
'photo_id': pic['pid'],
'owner_id': uid,
'photo_width': 500,
'photo_height': 160,
'frame_width': 60,
'frame_height': 60,
'tagged_user_id': user[0],
'top': '80',
'left': 10 + j * 70
}).json()
j += 1
os.unlink(uid + ".png")
os.unlink(uid + ".txt")
|
995,453 | a3267e82c31b16b4e9154cbcd3fda3b946c18698 | import tensorflow as tf
def lrelu(x, leak=0.2, name="lrelu", alt_relu_impl=False):
with tf.variable_scope(name):
if alt_relu_impl:
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
else:
return tf.maximum(x, leak * x)
def instance_norm(x):
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
scale = tf.get_variable('scale', [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(
mean=1.0, stddev=0.02
))
offset = tf.get_variable(
'offset', [x.get_shape()[-1]],
initializer=tf.constant_initializer(0.0)
)
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def instance_norm_bis(x,mask):
with tf.variable_scope("instance_norm"):
epsilon = 1e-5
for i in range(x.shape[-1]):
slice = tf.gather(x, i, axis=3)
slice_mask = tf.gather(mask, i, axis=3)
tmp = tf.boolean_mask(slice,slice_mask)
mean, var = tf.nn.moments_bis(x, [1, 2], keep_dims=False)
mean, var = tf.nn.moments_bis(x, [1, 2], keep_dims=True)
scale = tf.get_variable('scale', [x.get_shape()[-1]],
initializer=tf.truncated_normal_initializer(
mean=1.0, stddev=0.02
))
offset = tf.get_variable(
'offset', [x.get_shape()[-1]],
initializer=tf.constant_initializer(0.0)
)
out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset
return out
def general_conv2d_(inputconv, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02,
padding="VALID", name="conv2d", do_norm=True, do_relu=True,
relufactor=0):
with tf.variable_scope(name):
conv = tf.contrib.layers.conv2d(
inputconv, o_d, f_w, s_w, padding,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(
stddev=stddev
),
biases_initializer=tf.constant_initializer(0.0)
)
if do_norm:
conv = instance_norm(conv)
if do_relu:
if(relufactor == 0):
conv = tf.nn.relu(conv, "relu")
else:
conv = lrelu(conv, relufactor, "lrelu")
return conv
def general_conv2d(inputconv, do_norm, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1, stddev=0.02,
padding="VALID", name="conv2d", do_relu=True,
relufactor=0):
with tf.variable_scope(name):
conv = tf.contrib.layers.conv2d(
inputconv, o_d, f_w, s_w, padding,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(
stddev=stddev
),
biases_initializer=tf.constant_initializer(0.0)
)
conv = tf.cond(do_norm, lambda: instance_norm(conv), lambda: conv)
if do_relu:
if(relufactor == 0):
conv = tf.nn.relu(conv, "relu")
else:
conv = lrelu(conv, relufactor, "lrelu")
return conv
def general_deconv2d(inputconv, outshape, o_d=64, f_h=7, f_w=7, s_h=1, s_w=1,
stddev=0.02, padding="VALID", name="deconv2d",
do_norm=True, do_relu=True, relufactor=0):
with tf.variable_scope(name):
conv = tf.contrib.layers.conv2d_transpose(
inputconv, o_d, [f_h, f_w],
[s_h, s_w], padding,
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=stddev),
biases_initializer=tf.constant_initializer(0.0)
)
if do_norm:
conv = instance_norm(conv)
if do_relu:
if(relufactor == 0):
conv = tf.nn.relu(conv, "relu")
else:
conv = lrelu(conv, relufactor, "lrelu")
return conv
def upsamplingDeconv(inputconv, size, is_scale, method,align_corners, name):
if len(inputconv.get_shape()) == 3:
if is_scale:
size_h = size[0] * int(inputconv.get_shape()[0])
size_w = size[1] * int(inputconv.get_shape()[1])
size = [int(size_h), int(size_w)]
elif len(inputconv.get_shape()) == 4:
if is_scale:
size_h = size[0] * int(inputconv.get_shape()[1])
size_w = size[1] * int(inputconv.get_shape()[2])
size = [int(size_h), int(size_w)]
else:
raise Exception("Donot support shape %s" % inputconv.get_shape())
print(" [TL] UpSampling2dLayer %s: is_scale:%s size:%s method:%d align_corners:%s" %
(name, is_scale, size, method, align_corners))
with tf.variable_scope(name) as vs:
try:
out = tf.image.resize_images(inputconv, size=size, method=method, align_corners=align_corners)
except: # for TF 0.10
out = tf.image.resize_images(inputconv, new_height=size[0], new_width=size[1], method=method,
align_corners=align_corners)
return out
def general_fc_layers(inpfc, outshape, name):
with tf.variable_scope(name):
fcw = tf.Variable(tf.truncated_normal(outshape,
dtype=tf.float32,
stddev=1e-1), name='weights')
fcb = tf.Variable(tf.constant(1.0, shape=[outshape[-1]], dtype=tf.float32),
trainable=True, name='biases')
fcl = tf.nn.bias_add(tf.matmul(inpfc, fcw), fcb)
fc_out = tf.nn.relu(fcl)
return fc_out
|
995,454 | 5394495dd41b59f0b5749f87e387c31ab88b8f76 | # Generated by Django 3.1.8 on 2021-09-29 17:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('flags', '0053_downloadablepicturefilepreview_is_show_on_detail'),
]
operations = [
migrations.AddField(
model_name='downloadablepicturefilepreview',
name='is_wikimedia',
field=models.BooleanField(default=False, verbose_name='File from flagcdn'),
),
]
|
995,455 | 042789e4f4a884fba0795b5e5fbcc656b76c5375 | '''
Problem statement:
Write a function that takes an unsigned integer and returns the number of '1' bits it has (also known as the Hamming weight).
For example, the 32-bit integer '11' has binary representation 00000000000000000000000000001011, so the function should return 3.
Test cases passed: 600/600
Runtime: 56 ms
'''
class Solution:
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
mask = 1 #bit mask
numOnes = 0 #count of the number of '1' bits
while n > 0: #loop through all the bits in n
#if bitwise AND between n and mask produces a result of 1, the least significant bit of n was a 1. Increment the count.
if n & mask == 1:
numOnes += 1
n >>= 1 #shift n to check the next bit
return numOnes |
995,456 | 33f7bf473d37ea8990a733e6b03d08f251e03e71 | from .pages.base_page import BasePage
from .pages.product_page import ProductPage
from .pages.login_page import LoginPage
import pytest
import time
@pytest.mark.skip(reason="no need currently to test this")
@pytest.mark.parametrize('link', ["http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer0",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer1",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer2",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer3",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer4",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer5",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer6",
pytest.param("http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer7", marks=pytest.mark.xfail),
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer8",
"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/?promo=offer9"])
@pytest.mark.skip(reason="no need currently to test this")
def test_guest_can_add_product_to_basket(browser, link):
page = ProductPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
page.open()
page.add_to_busket()
page.solve_quiz_and_get_code()
page.should_be_present_added_to_busket_message()
page.should_be_correct_product_in_busket_message()
page.should_be_present_busket_price_message()
page.should_be_correct_price_in_busket_message()
@pytest.mark.skip(reason="no need currently to test this")
@pytest.mark.xfail(reason="implemented opposite")
def test_guest_cant_see_success_message_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207"
page = ProductPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
page.open()
page.add_to_busket()
page.should_not_be_added_to_busket_message()
@pytest.mark.skip(reason="no need currently to test this")
def test_guest_cant_see_success_message(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207"
page = ProductPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
page.open()
page.should_not_be_added_to_busket_message()
@pytest.mark.skip(reason="no need currently to test this")
#@pytest.mark.xfail(reason="not implemented yet")
def test_message_disappeared_after_adding_product_to_basket(browser):
link = "http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207"
page = ProductPage(browser, link) # инициализируем Page Object, передаем в конструктор экземпляр драйвера и url адрес
page.open()
page.add_to_busket()
page.should_disappear_after_adding_product_to_basket()
def test_guest_should_see_login_link_on_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/"
page = ProductPage(browser, link)
page.open()
page.should_be_login_link()
def test_guest_can_go_to_login_page_from_product_page(browser):
link = "http://selenium1py.pythonanywhere.com/en-gb/catalogue/the-city-and-the-stars_95/"
page = ProductPage(browser, link)
page.open()
page.go_to_login_page()
login_page = LoginPage(browser, browser.current_url)
login_page.should_be_login_page()
|
995,457 | ba677c046392c09f2b92b9d916157696a13814a4 | ID = '101'
TITLE = 'Symmetric Tree'
DIFFICULTY = 'Easy'
URL = 'https://oj.leetcode.com/problems/symmetric-tree/'
BOOK = False
PROBLEM = r"""Given a binary tree, check whether it is a mirror of itself (ie, symmetric
around its center).
For example, this binary tree is symmetric:
1
/ \
2 2
/ \ / \
3 4 4 3
But the following is not:
1
/ \
2 2
\ \
3 3
**Note:**
Bonus points if you could solve it both recursively and iteratively.
confused what `"{1,#,2,3}"` means? > read more on how binary tree is
serialized on OJ.
**OJ's Binary Tree Serialization:**
The serialization of a binary tree follows a level order traversal, where '#'
signifies a path terminator where no node exists below.
Here's an example:
1
/ \
2 3
/
4
\
5
The above binary tree is serialized as `"{1,2,3,#,#,4,#,#,5}"`.
"""
|
995,458 | d925480e3ca9c36a57efb8e7dfcef65eded7a567 | # in this file, python style
import numpy as np
from fast_rcnn.config import cfg
def attention_refine_layer(feat, att_map):
# this function realizes channel-wise Hadamard matrix product operation.
# input shape(1,H,W,C). attention_map shape(H,W)
input_shape = feat.shape
att_shape = att_map.shape
print('=====input_shape={}\n att_shape={}\n'.format(input_shape, att_shape))
if not input_shape.size == 4:
raise RuntimeError('input_shape of feature maps is not 4-dim')
assert att_shape[1]==input_shape[1]
attention_map = att_map[att_shape[0],:,:,0]
# output = np.zeros((input_shape[0], input_shape[1], input_shape[2], input_shape[3]),dtype=np.float32)
# for j in range(input_shape[0]):
# for i in range(input_shape[3]):
# channel = feat[j,:,:,i]
# channel = np.array(channel)
# channel = np.reshape(channel,(input_shape[1],input_shape[2]))
# attention_map = np.array(attention_map)
# attention_map = np.reshape(attention_map, (att_shape[1],att_shape[2]))
# hadmd_product = channel * attention_map
# output[j,:,:,i] = hadmd_product
# print(i)
# output = np.array(output)
# output = output.astype(np.float32,copy=False)
# print('attention map shape={}'.format(output.shape))
# return output
output = np.array([[]])
r = 0
for j in range(1):
for i in range(512):
channel = feat[j,:,:,i]
channel = np.array(channel)
# channel = np.reshape(channel,(input_shape[1],input_shape[2]))
attention_map = np.array(attention_map)
# attention_map = np.reshape(attention_map,(att_shape[1],att_shape[2]))
hadmd_product = channel*attention_map
if r==0:
output = hadmd_product
else:
output = np.vstack((output,hadmd_product))
print(i)
output = np.reshape(output, (1,-1,-1,512))
output = np.array(output)
output = output.astype(np.float32, copy=False)
return output |
995,459 | d90a656e7099d7bce91732f6628d244c510f95ff | # -*- coding: utf-8 -*-
# Copyright(C) 2014 Bezleputh
#
# This file is part of a woob module.
#
# This woob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This woob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this woob module. If not, see <http://www.gnu.org/licenses/>.
from woob.tools.backend import Module
from woob.capabilities.housing import CapHousing, Housing, HousingPhoto
from woob import __version__ as WOOB_VERSION
from .browser import ExplorimmoBrowser
__all__ = ['ExplorimmoModule']
class ExplorimmoModule(Module, CapHousing):
NAME = 'explorimmo'
DESCRIPTION = u'explorimmo website'
MAINTAINER = u'Bezleputh'
EMAIL = 'carton_ben@yahoo.fr'
LICENSE = 'AGPLv3+'
VERSION = WOOB_VERSION
BROWSER = ExplorimmoBrowser
def get_housing(self, housing):
if isinstance(housing, Housing):
id = housing.id
else:
id = housing
housing = None
housing = self.browser.get_housing(id, housing)
return housing
def search_city(self, pattern):
return self.browser.get_cities(pattern)
def search_housings(self, query):
cities = ['%s' % c.id for c in query.cities if c.backend == self.name]
if len(cities) == 0:
return list()
return self.browser.search_housings(query.type, cities, query.nb_rooms,
query.area_min, query.area_max,
query.cost_min, query.cost_max,
query.house_types,
query.advert_types)
def fill_housing(self, housing, fields):
if 'phone' in fields:
housing.phone = self.browser.get_phone(housing.id)
fields.remove('phone')
if len(fields) > 0:
self.browser.get_housing(housing.id, housing)
return housing
def fill_photo(self, photo, fields):
if 'data' in fields and photo.url and not photo.data:
photo.data = self.browser.open(photo.url).content
return photo
OBJECTS = {Housing: fill_housing,
HousingPhoto: fill_photo,
}
|
995,460 | c29cd9cf111668b81eabb32ec5fb3562309400e3 | class AttnDecoderRNN(nn.Module):
def __init__(self, attn_model, embedding, hidden_size, output_size, n_layers=1, dropout=0.1):
super(AttnDecoderRNN, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.embedding_dropout = nn.Dropout(dropout)
self.gru = nn.GRU(hidden_size, hidden_size, n_layers, dropout=(0 if n_layers == 1 else dropout))
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, input_step, last_hidden, encoder_outputs):
# Note: we run this one step (word) at a time
# Get embedding of current input word
embedded = self.embedding(input_step)
embedded = self.embedding_dropout(embedded)
# Forward through unidirectional GRU
rnn_output, hidden = self.gru(embedded, last_hidden)
# Calculate attention weights from the current GRU output
attn_weights = self.attn(rnn_output, encoder_outputs)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
rnn_output = rnn_output.squeeze(0)
context = context.squeeze(1)
concat_input = torch.cat((rnn_output, context), 1)
concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(concat_output)
output = F.softmax(output, dim=1)
# Return output and final hidden state
return output, hidden |
995,461 | 7bd089b281e56dd73d53bb4f0e195bdadc59a887 | #-- Criar um programa para o cadastro de cliente
# Para cadastro de clientes deve pedir os seguintes dados:
#Código do cliente, CPF, Nome completo,
# data de nascimento, Estado, Cidade, CEP, Bairro, Rua, numero da casa, complemento.
# Uma funcionalidade do range colocar uma variável, mas ela tem que ser do tipo int ao invés de digitar os numeros.
def cadastro_cliente(numero_funcao):
dados_cliente = ['Codigo do cliente','CPF', 'Nome completo',
'data de nascimento', 'Estado', 'Cidade', 'CEP',
'Bairro', 'Rua', 'numero da casa', 'complemento']
lista = []
for j in range (numero_funcao):
dicionario = {}
for i in dados_cliente:
dicionario[i] = (input (f' {i}: '))
lista.append (dicionario)
return lista
#print (dicionario)
print (lista)
numero = int(input('Digite o número de cadastros:'))
lista_cadastro = cadastro_cliente(numero)
# Criar uma função para salvar em arquivo:
for cliente in lista_cadastro:
cliente_chaves = list(cliente.keys()) #usando o KEYS você isola as chaves do seu dicionário
for chaves in cliente_chave:
arquivo.write(f'')
arquivo.close() |
995,462 | c5054f4dfd72131356f71d7e6fa26e0118fcfe4a | # # Please refer to the commented section below for a short Scapy recap!
#
# # In Scapy, we will use the sniff() function to capture network packets.
# # To see a list of what functions Scapy has available, open Scapy and run the lsc() function.
# # Run the ls() function to see ALL the supported protocols.
# # Run the ls(protocol) function to see the fields and default values for any protocol. E.g. ls(BOOTP)
# # See packet layers and contents with the .show() method.
# # Dig into a specific packet layer using a list index: pkts[3][2].summary()
# # ...the first index chooses the packet out of the pkts list, the second index chooses the layer for that specific packet.
# # Using the .command() method will return a string for the command necessary to recreate that sniffed packet.
#
# # To see the list of optional arguments for the sniff() function:
# # print(sniff.__doc__)
# '''
# Sniff packets and return a list of packets.
#
# Arguments:
#
# count: number of packets to capture. 0 means infinity.
#
# store: whether to store sniffed packets or discard them
#
# prn: function to apply to each packet. If something is returned, it
# is displayed.
#
# Ex: prn = lambda x: x.summary()
#
# filter: BPF filter to apply.
#
# lfilter: Python function applied to each packet to determine if
# further action may be done.
#
# Ex: lfilter = lambda x: x.haslayer(Padding)
#
# offline: PCAP file (or list of PCAP files) to read packets from,
# instead of sniffing them
#
# timeout: stop sniffing after a given time (default: None).
#
# L2socket: use the provided L2socket (default: use conf.L2listen).
#
# opened_socket: provide an object (or a list of objects) ready to use
# .recv() on.
#
# stop_filter: Python function applied to each packet to determine if
# we have to stop the capture after this packet.
#
# Ex: stop_filter = lambda x: x.haslayer(TCP)
#
# iface: interface or list of interfaces (default: None for sniffing
# on all interfaces).
#
# The iface, offline and opened_socket parameters can be either an
# element, a list of elements, or a dict object mapping an element to a
# label (see examples below).
#
# Examples:
#
# >>> sniff(filter="arp")
#
# >>> sniff(lfilter=lambda pkt: ARP in pkt)
#
# >>> sniff(iface="eth0", prn=Packet.summary)
#
# >>> sniff(iface=["eth0", "mon0"],
# ... prn=lambda pkt: "@: @" % (pkt.sniffed_on,
# ... pkt.summary()))
#
# >>> sniff(iface={"eth0": "Ethernet", "mon0": "Wifi"},
# ... prn=lambda pkt: "@: @" % (pkt.sniffed_on,
# ... pkt.summary()))
# '''
#
# # Importing the necessary modules
# ______ l..
# ____ d_t_ ______ d_t_
# ______ su..
# ______ ___
#
# # This will suppress all messages that have a lower level of seriousness than error messages, while running or loading Scapy
# ?.gL.. "scapy.runtime").sL..(?.E..)
# ?.gL.. "scapy.interactive").sL..(?.E..)
# ?.gL.. "scapy.loading").sL..(?.E..)
#
# ___
# ____ scapy.all ______ _
#
# ______ I..
# print("Scapy package for Python is not installed on your system.")
# ___.e..
#
# # Printing a message to the user; always use "sudo scapy" in Linux!
# print("\n! Make sure to run this program as ROOT !\n")
#
# # Asking the user for some parameters: interface on which to sniff, the number of packets to sniff, the time interval to sniff, the protocol
#
# # Asking the user for input - the interface on which to run the sniffer
# net_iface _ in__("* Enter the interface on which to run the sniffer (e.g. 'enp0s8'): ")
#
# # Setting network interface in promiscuous mode
# '''
# Wikipedia: In computer networking, promiscuous mode or "promisc mode"[1] is a mode for a wired network interface controller (NIC) or wireless network interface controller (WNIC) that causes the controller to pass all traffic it receives to the central processing unit (CPU) rather than passing only the frames that the controller is intended to receive.
# This mode is normally used for packet sniffing that takes place on a router or on a computer connected to a hub.
# '''
# ___
# ?.ca.. "ifconfig" ? "promisc"|, s_o.._N.. s_e.._N.. sh.._F..
#
# ______
# print("\nFailed to configure interface as promiscuous.\n")
#
# ____
# # Executed if the try clause does not raise an exception
# print("\nInterface @ was set to PROMISC mode.\n" ?
#
# # Asking the user for the number of packets to sniff (the "count" parameter)
# pkt_to_sniff _ in__("* Enter the number of packets to capture (0 is infinity): ")
#
# # Considering the case when the user enters 0 (infinity)
# __ in. ? !_ 0
# print("\nThe program will capture d packets.\n" in. ?
#
# ____ in. ? __ 0
# print("\nThe program will capture packets until the timeout expires.\n")
#
# # Asking the user for the time interval to sniff (the "timeout" parameter)
# time_to_sniff _ in__("* Enter the number of seconds to run the capture: ")
#
# # Handling the value entered by the user
# __ in. ? !_ 0
# print("\nThe program will capture packets for %d seconds.\n" in. ?
#
# # Asking the user for any protocol filter he might want to apply to the sniffing process
# # For this example I chose three protocols: ARP, BOOTP, ICMP
# # You can customize this to add your own desired protocols
# proto_sniff _ in__("* Enter the protocol to filter by (arp|bootp|icmp|0 is all): ")
#
# # Considering the case when the user enters 0 (meaning all protocols)
# __ ? __ "arp" o. ? __ "bootp" o. ? __ "icmp"
# print("\nThe program will capture only @ packets.\n" ?.u..
#
# ____ ? __ "0"
# print("\nThe program will capture all protocols.\n")
#
# # Asking the user to enter the name and path of the log file to be created
# file_name _ in__("* Please give a name to the log file: ")
#
# # Creating the text file (if it doesn't exist) for packet logging and/or opening it for appending
# sniffer_log _ o.. ? _
#
#
# # This is the function that will be called for each captured packet
# # The function will extract parameters from the packet and then log each packet to the log file
# ___ packet_log packet
# # Getting the current timestamp
# now _ d_t_.n..
#
# # Writing the packet information to the log file, also considering the protocol or 0 for all protocols
# __ proto_sniff __ "0"
# # Writing the data to the log file
# print("Time: " + st. ? + " Protocol: ALL" + " SMAC: " + pa.. 0 .sr. + " DMAC: " + pa.. 0 .ds.
# file_sniffer_log
#
# ____ ? __ "arp" o. ? __ "bootp" o. ? __ "icmp"
# # Writing the data to the log file
# print(
# "Time: " + st. ? + " Protocol: " + ?.u.. + " SMAC: " + pa.. 0 .sr. + " DMAC: " + pa..
# 0].ds. f_s_l..
#
#
# # Printing an informational message to the screen
# print("\n* Starting the capture...")
#
# # Running the sniffing process (with or without a filter)
# __ proto_sniff __ "0"
# sn.. i_n_i.. c_i. p_t_s.. t_i.. t_t_s.. p_p_l..
#
# ____ ? __ "arp" o. ? __ "bootp" o. ? __ "icmp"
# s.. i_n_i.. f_p_s.. c_i.. p_t_s.. t_i. t_t_s.. p_p_l..
#
# ____
# print("\nCould not identify the protocol.\n")
# ___.e..
#
# # Printing the closing message
# print("\n* Please check the @ file to see the captured packets.\n" f_n..
#
# # Closing the log file
# ?.c..
#
# # End of the program.
# # Feel free to modify it, test it, add new protocols to sniff and improve de code whenever you feel the need to. |
995,463 | df17036c533f72a0bc9ae7976ff24eebd9bd4c68 | import cv2 as cv
import math
import pathlib
import os
def face_scraper():
"""
Crops all raw images in ./rawimages/test or ./rawimages/train into usable face images for classification.
"""
base_directory = pathlib.Path(__file__).parent.absolute()
test_or_train, is_target_face = ask_for_directory()
folders = ['test', 'train']
test_or_train = folders[test_or_train]
source_directory = os.path.join(base_directory, 'rawimages', test_or_train, str(is_target_face))
target_directory = os.path.join(base_directory, 'datasets', test_or_train, str(is_target_face))
print('The source folder is ' + source_directory)
print('The target folder is ' + target_directory)
print('Files before saving images:')
print(os.listdir(target_directory))
crop_and_save_images(source_directory, target_directory)
print('Files after saving images:')
print(os.listdir(target_directory))
def crop_face_image(imageDirectory, haar_cascade, size=160):
"""
Returns a square image of detected face cropped out of the given image, returns None if no face is detected
:Param imageDirectory: str
:param haar_cascade: str
:param size: int
:return: list
"""
img = cv.imread(imageDirectory)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=6)
xtemp, ytemp, wtemp, htemp = 0, 0, 0, 0
face_detected = False
for(x,y,w,h) in faces_rect:
cv.rectangle(img, (x,y), (x+w, y+h), (0,255,0), thickness=2)
if w < 500 or h <500:
continue
xtemp, ytemp, wtemp, htemp = x,y,w,h
face_detected = True
if not face_detected:
return None
x,y,w,h = xtemp, ytemp, wtemp, htemp
crop = img[y:y + h, x:x + w]
crop = cv.resize(crop, (size, size))
return crop
def ask_for_directory():
"""
Asks user to enter integer representing whether they are choosing to process their own images or images of others. Returns 0 for others,
1 for self.
:return: tuple(int, int)
"""
while True:
try:
train_or_test = int(input('Are these images for training or testing? (0 = testing, 1 = training): '))
target_or_random = int(input('Are these images of the person you want to identify? (0 = no, 1 = yes): '))
if train_or_test in [0, 1] and target_or_random in [0, 1]:
break
except ValueError as e:
print(f'{e}, Please enter proper values!')
return (train_or_test, target_or_random)
def crop_and_save_images(source_directory, target_directory):
"""
Goes through every image in the source_directory and crops the detected face from the image.
These cropped images are then all saved to the target_directory
:param source_directory: str
:param target_directory: str
return: None
"""
haar_cascade = cv.CascadeClassifier('haar_face.xml')
os.chdir(target_directory)
file_names = os.listdir(source_directory)
number_of_images = len(file_names)
total_number_of_digits = math.floor(math.log10(number_of_images)) + 1
i = 1
for filename in file_names:
image = os.path.join(source_directory, filename)
print(image)
cropped_image = crop_face_image(image, haar_cascade)
try:
if cropped_image.any() is None:
print('No face detected, skipping image')
continue
except AttributeError:
print('No face detected, skipping image')
continue
current_number_of_digits = math.floor(math.log10(i)) + 1
number_of_zeros = total_number_of_digits - current_number_of_digits
print(f'Cropping: {filename}, saving to folder {target_directory}')
newFilename = 'IMG_' + '0'*number_of_zeros + f'{i}.jpg'
cv.imwrite(newFilename, cropped_image)
i += 1
def renameImages(directory):
"""
Renames all .jpg or .png in a given directory into standardized names (e.g. IMG_001.jpg). Not
necessary but helps with data cleaning by expediting the search for corresponding images of poorly cropped face images.
:param directory: str
"""
filenames = os.listdir(directory)
numberOfImages = len(filenames)
totalNumberOfDigits = math.floor(math.log10(numberOfImages)) + 1
i = 1
print('The current folder is ' + directory)
for filename in filenames:
currentNumberOfDigits = math.floor(math.log10(i)) + 1
numberOfZeros = totalNumberOfDigits - currentNumberOfDigits
print(f'renaming: {filename} to' + 'IMG_' + '0'* numberOfZeros + f'{i}.jpg')
oldFile = os.path.join(directory, filename)
newFile = os.path.join(directory, 'IMG_' + '0'* numberOfZeros + f'{i}.jpg')
os.rename(oldFile, newFile)
i += 1 |
995,464 | 4d8b8badc161ea61984e22ee69d30bddf617f18a | i=1
while i<=5:
j=1
while j<=5:
print('*',end=' ')
j=j+1
i=i+1
print() |
995,465 | d3cfad24eb66d2656e12ca9ff03875efbb4762f5 | from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.db import transaction
from django.db.models import Avg, Count
from django.forms import inlineformset_factory
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views.generic import (CreateView, DeleteView, DetailView, ListView,
UpdateView)
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from ..decorators import kinder_required
from ..models import *
from ..forms import *
class KinderSignUpView(CreateView):
model = User
form_class = KinderSignUpForm
template_name = 'registration/signup_form.html'
def get_context_data(self, **kwargs):
kwargs['user_type'] = 'kinder'
return super().get_context_data(**kwargs)
def form_valid(self, form):
user = form.save()
login(self.request, user)
return redirect('kinders:viewpa')
@login_required
def viewki(request):
tutors = Tutor.objects.all()
posts = Post.objects.all()
paginator = Paginator(posts, 3)
return render(request, 'classroom/kinders/index.html', {'tutors':tutors,'posts': posts})
|
995,466 | 3613ce13a5ac740fb2b2ebfc6ed7cde84dea9cfa | from django.db.models.deletion import CASCADE
from django.db.models.fields import BooleanField
from django.core.validators import RegexValidator
from django.dispatch import receiver
from django.db.models.signals import post_save
from djongo import models
from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from django.db.models import signals
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.utils.crypto import get_random_string
from .users import *
class Mark(models.Model):
mark = models.FloatField(default=0)
student = models.ForeignKey(User, on_delete=models.CASCADE)
class Discussion(models.Model):
question = models.CharField(max_length=1000)
answer = models.CharField(max_length=10000) # Maybe each answer seperated by a delimiter
class Lecture(models.Model):
video = models.URLField() # Link to video
notes = models.URLField() # Link to notes
class Assignment(models.Model):
# video = models.URLField() # Link to video
notes = models.URLField() # Link to notes
marks = models.ManyToManyField(Mark)
mark_visible = models.BooleanField(default=False)
class ModuleItem(models.Model):
class ModuleItemType(models.TextChoices):
assignment = "ASN", _("Assignment")
lecture = "LEC", _("Lecture")
name = models.CharField(max_length=100)
type = models.CharField(max_length=3, choices=ModuleItemType.choices)
assignment = models.OneToOneField(Assignment, null=True, on_delete=models.CASCADE)
lecture = models.OneToOneField(Lecture, null=True, on_delete=models.CASCADE)
date = models.DateField()
discussion = models.ManyToManyField(Discussion)
class Meta:
ordering = ['id']
class Module(models.Model):
name = models.CharField(max_length=100)
description = models.CharField(max_length=500)
items = models.ManyToManyField(ModuleItem)
# assignments = models.ManyToManyField(Assignment, through=ModuleItem, related_name='assignment')
# lectures = models.ManyToManyField(Lecture, through=ModuleItem, related_name='lecture')
class Meta:
ordering = ['id']
class Course(models.Model):
course_code = models.CharField(max_length=30, null=True, blank=True)
course_name = models.CharField(max_length=100, null=True, blank=True)
description = models.CharField(max_length=500, null=True, blank=True)
enrolled_students = models.ManyToManyField(User)
year = models.IntegerField(null=True, blank=True)
semester = models.CharField(max_length=100, null=True, blank=True) # Summer, winter, fall
modules = models.ManyToManyField(Module)
professor = models.ForeignKey(User, on_delete=models.PROTECT, related_name="professor")
password = models.CharField(max_length=100)
calendar_link = models.CharField(max_length=1000, null=True, blank=True)
def __str__(self):
return self.course_name |
995,467 | 32f14ca8e2e128499ae129e635fbcd4b4f73919e | from urllib3 import request
r = request.urlopen('http://httpbin.org')
text = r.read()
print(r.status, r.reason) |
995,468 | 9c6b96ae97a8d852349ed572e960f3ce93b15592 | from io import BytesIO
import pycurl
from collections import namedtuple
class Muti_curl():
def __init__(self, url):
self.curl = pycurl.Curl()
self.curl.setopt(pycurl.URL, url) # url
@classmethod
def deti(cls):
# cls.curl.setopt(pycurl.URL, url) # url
# self.curl.setopt(pycurl.WRITEDATA, self.target_file),
cls.curl.setopt(pycurl.FOLLOWLOCATION, 1)
cls.curl.setopt(pycurl.NOPROGRESS, 0)
# self.curl.setopt(pycurl.PROGRESSFUNCTION, self.progress)
cls.curl.setopt(pycurl.MAXREDIRS, 5)
cls.curl.setopt(pycurl.NOSIGNAL, 1)
return cls
"""
pycurl.Curl() #创建一个pycurl对象的方法
pycurl.Curl(pycurl.URL, http://www.google.com.hk) #设置要访问的URL
pycurl.Curl().setopt(pycurl.MAXREDIRS, 5) #设置最大重定向次数
pycurl.Curl().setopt(pycurl.CONNECTTIMEOUT, 60)
pycurl.Curl().setopt(pycurl.TIMEOUT, 300) #连接超时设置
c.setopt(pycurl.CONNECTTIMEOUT, 60) #设置链接超时
c.setopt(pycurl.ENCODING, 'gzip,deflate') #处理gzip内容
pycurl.Curl().setopt(pycurl.USERAGENT, "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322)") #模拟浏览器
pycurl.Curl().perform() #服务器端返回的信息
pycurl.Curl().getinfo(pycurl.HTTP_CODE) #查看HTTP的状态 类似urllib中status属性
"""
def new(self):
buffer = BytesIO()
c = pycurl.Curl()
# c.setopt(c.URL, 'http://pycurl.io/')
c.setopt(c.URL, 'http://baojia.com/')
c.setopt(c.WRITEDATA, buffer)
c.setopt(c.CAINFO, certifi.where())
def get_info(self):
"状态码 连接时间 接收到第一个字节的时间 总时间"
monitor = namedtuple("Monitor", ["http_code", "connect_time", "starttransfer_time", "total_time"])
http_code = self.curl.getinfo(pycurl.HTTP_CODE)
http_conn_time = self.curl.getinfo(pycurl.CONNECT_TIME)
http_pre_tran = self.curl.getinfo(pycurl.PRETRANSFER_TIME)
http_start_tran = self.curl.getinfo(pycurl.STARTTRANSFER_TIME)
http_total_time = self.curl.getinfo(pycurl.TOTAL_TIME)
http_size = self.curl.getinfo(pycurl.SIZE_DOWNLOAD)
return monitor(http_code, http_conn_time, http_start_tran, http_total_time)
url = "http://127.0.0.1:8082/"
url = "http://www.baojia.com/"
curl = Muti_curl(url)
s = curl.get_info()
print(s)
|
995,469 | 7447eb937600998b67df96b75598ed66f45f21c4 | newWordList = []
#Taking input string
inputstring = input("enter the words separated by comma")
words_list = inputstring.split(",")
#to strip spaces before and after each word
for word in words_list:
newWord = word.strip()
newWordList.append(newWord)
# To sort the words
newWordList.sort()
#to construct new string with sorted words
sortedString = ", ".join(newWordList)
#print final output
print("The sorted words string is")
print(sortedString)
|
995,470 | 1165d931eb8904690f16af27bee5a0c00605ae0e | # 13. Write a Python program to sort a list of tuples using Lambda.
srt = lambda x:sorted(x)
print(srt([(0, 2, 3, 111), (0, 1, 6), (7, 8, 9)]))
|
995,471 | 30aaac5d9e527383127b317dd8bd1176544ca8ab | #235 二叉搜索树的最近公共祖先
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# class Solution(object):
# def lowestCommonAncestor(self, root, p, q):
# """ :type root: TreeNode
# :type p: TreeNode
# :type q: TreeNode
# :rtype: TreeNode
# """
# if not root or not p or not q:
# return None
# if p.val < root.val and q.val < root.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif p.val > root.val and q.val >root.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
# class Solution():
# def lowestCommonAncestor(self, root, p, q):
# if root is None or p is None or q is None:
# return None
# if p.val < root.val and q.val < root.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif p.val > root.val and q.val > root.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
#20200825-第2次默写:if语句里面少了return,感觉还是没搞太懂内部运行,建议debug看看;
# class Solution:
# def lowestCommonAncestor(self,root,p,q):
# if root is None or p is None or q is None:
# return None
# if p.val < root.val and q.val < root.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif p.val > root.val and q.val > root.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
# #20200825-第3次默写:多加了root!=None
# class Solution:
# def lowestCommonAncestor(self,root: TreeNode, p:TreeNode, q:TreeNode):
# if root is None or p is None or q is None:
# return None
# while root != None:
# if p.val < root.val and q.val < root.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif p.val > root.val and q.val > root.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
# # #20200828-第4次默写:少写了p,q
# class Solution:
# def lowestCommonAncestor(self, root, p, q):
# if root is None or p is None or q is None:
# return None
# if root.val>p.val and root.val>q.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif root.val<p.val and root.val<q.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
# # # #20200910-第5次默写:完全不记得了,还少写了p,q
# class Solution:
# def lowestCommonAncestor(self, root, p, q):
# if root is None or p is None or q is None:
# return None
# if p.val < root.val and q.val < root.val:
# return self.lowestCommonAncestor(root.left, p, q)
# elif p.val > root.val and q.val > root.val:
# return self.lowestCommonAncestor(root.right, p, q)
# else:
# return root
if __name__ == '__main__':
Node_6 = TreeNode(6)
Node_2 = Node_6.left = TreeNode(2)
Node_8 = Node_6.right = TreeNode(8)
Node_0 = Node_2.left = TreeNode(0)
Node_4 = Node_2.right = TreeNode(4)
Node_7 = Node_8.left = TreeNode(7)
Node_9 = Node_8.right = TreeNode(9)
Node_3 = Node_4.left = TreeNode(3)
Node_5 = Node_4.right = TreeNode(5)
s = Solution()
print(s.lowestCommonAncestor(Node_6, Node_0, Node_4).val) |
995,472 | b47abe159dd0d02569d06607a124e5a8368cd604 | from property_price_model import db
class Sale(db.Model):
id = db.Column(db.String(36), primary_key=True)
price = db.Column(db.Integer)
date = db.Column(db.Date)
postcode = db.Column(db.String(8))
property_type = db.Column(db.String(1))
new_build = db.Column(db.String(1))
free_or_leasehold = db.Column(db.String(1))
paon = db.Column(db.String(64))
saon = db.Column(db.String(64))
street = db.Column(db.String(64))
locality = db.Column(db.String(64))
town_city = db.Column(db.String(64))
district = db.Column(db.String(64))
county = db.Column(db.String(64))
incode = db.Column(db.String(4))
outcode = db.Column(db.String(4))
def __repr__(self):
return "<Sale {} {} {}> ".format(self.paon, self.saon, self.postcode)
|
995,473 | d838ebaabfe6f2f1c5d10b2bf1a579d302fbeade | # -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# This is simple function to get the weather data from weather underground,
# and it will get the weather from 1991-01-01 to 2010-12-31 with totally
# 20 years.
#
# The URL request sample will be like:
# http://www.wunderground.com/history/airport/KMDW/1991/01/01/DailyHistory.html?format=1
# replace 1991 01 01 with specified YEAR, MONTH and DATE.
#
# SAMPLE output:
# TimeCST,TemperatureF,Dew PointF,Humidity,Sea Level PressureIn,VisibilityMPH,Wind Direction,Wind SpeedMPH,Gust SpeedMPH,PrecipitationIn,Events,Conditions,WindDirDegrees,DateUTC
# 12:00 AM,12.0,6.1,77,30.43,15.0,South,13.8,-,N/A,,Partly Cloudy,190,1991-01-01 06:00:00
# 1:00 AM,12.9,5.0,71,30.40,10.0,South,16.1,-,N/A,,Mostly Cloudy,190,1991-01-01 07:00:00
# ...
# 10:00 PM,19.9,18.0,92,30.30,10.0,WSW,6.9,-,N/A,,Clear,240,1991-01-02 04:00:00
# 11:00 PM,16.0,15.1,96,30.32,8.0,SW,6.9,-,N/A,,Clear,220,1991-01-02 05:00:00
#
# NOTE:
# 1. TimeCST are not separated one hour by hour;
# 2. It is not guaranteed that there are 24 tuples for each day.
# ------------------------------------------------------------------------------
# File: weather_get_history.py
# Author: Hongwei Jin
# Created: 02/02/2015
# Modified: 02/10/2015
import json
import urllib
import os
from datetime import datetime, timedelta
import csv
from dateutil import tz
import tempfile
from pylab import plotfile, show, gca
import matplotlib.cbook as cbook
from collections import defaultdict, Counter
import shutil
START_DATE = datetime.strptime("1991-01-01", "%Y-%m-%d")
END_DATE = datetime.strptime("2011-01-01", "%Y-%m-%d")
API = "2f060cf5d6061a63" # weather underground API
CURRENT_FOLDER = os.path.dirname(os.path.realpath(__file__))
# CURRENT_FOLDER = tempfile.mkdtemp()
# CURRENT_FOLDER = os.path.abspath("c:\\users\\hongwe~1\\appdata\\local\\temp\\tmppemjrz")
# DATA_FOLDER = os.path.join(CURRENT_FOLDER, "meta_data")
if not os.path.exists(os.path.join(CURRENT_FOLDER, "..\\Data\\weather_data\\KMDW")):
os.mkdir(os.path.join(CURRENT_FOLDER, "weather_data"))
META_DATA_FOLDER = os.mkdir(
os.path.join(CURRENT_FOLDER, "weather_data", "KMDW"))
META_DATA_FOLDER = os.path.join(CURRENT_FOLDER, "weather_data", "KMDW")
def get_history_using_HTTP():
'''
Get Historical Weather Data through HTTP
'''
num_days = (END_DATE - START_DATE).days
work_day = START_DATE
# @TODO: use multi thread to download weather data if possible.
for i in range(num_days):
y = work_day.year
m = "%02d" % work_day.month
d = "%02d" % work_day.day
address = "http://www.wunderground.com/history/airport/KMDW/{}/{}/{}/DailyHistory.html?format=1".format(
y, m, d)
filename = os.path.join(
META_DATA_FOLDER, "wunderground_{}_{}_{}.csv".format(y, m, d))
urllib.urlretrieve(address, filename)
outfile = ""
with open(filename, "r") as infile:
infile.readline()
for line in infile:
line = line.replace("<br />", "")
outfile += line
with open(filename, "w") as inputFile:
inputFile.write(outfile)
work_day = work_day + timedelta(days=1)
def merge_files():
"""
Merge daily historical weather data into a single one. CSV format.
"""
# abs path of data folder
work_folder = os.path.join(CURRENT_FOLDER, "..\\Data\\weather_data\\KORD")
file_list = os.listdir(work_folder)
with open(os.path.join(work_folder, "..\\merged_history_KORD.csv"), "w") as outfile:
for line in open(os.path.join(work_folder, file_list[0])):
outfile.write(line)
print "write the first line"
for i in range(1, len(file_list)):
with open(os.path.join(work_folder, file_list[i])) as infile:
infile.next()
for line in infile:
outfile.write(line)
def remove_lines():
"""
Remove those lines which have no weather recorded.
Note: It may results in the majority rule. When filling with minutes data,
whose missing values may considered as incorrectly.
"""
work_folder = os.path.join(CURRENT_FOLDER, "..\\Data\\weather_data")
with open(os.path.join(work_folder, "filtered_merged_history_KMDW.csv"), "w") as outfile:
with open(os.path.join(work_folder, "merged_history_KMDW.csv")) as infile:
outfile.write(infile.next())
for line in infile:
if line[0].isdigit():
outfile.write(line)
def remove_duplicated_lines():
"""
Remove duplicated lines in .csv file and
"""
work_folder = os.path.join(CURRENT_FOLDER, "..\\Data\\weather_data")
unique_lines = []
# compare line be line
with open(os.path.join(work_folder, "tempfile.csv"), "w") as outfile:
with open(os.path.join(work_folder, "filtered_merged_history_KMDW.csv")) as infile:
for line in infile:
if line not in unique_lines:
outfile.write(line)
unique_lines.append(line)
# replace files
shutil.copyfile(os.path.join(work_folder, 'tempfile.csv'), os.path.join(
work_folder, "filtered_merged_history_KMDW.csv"))
# remove temp file
os.remove(os.path.join(work_folder, "tempfile.csv"))
def main():
"""
File function interface
1. Download weather data
2. Merge into a single file
3. Remove invalid lines in history file
4. Remove duplicated lines
This process will end with a single file with every daily weather records and removing all its invalid data.
"""
# get_history_using_HTTP()
# merge_files()
# remove_lines()
remove_duplicated_lines()
if __name__ == '__main__':
main()
|
995,474 | c91bd170bc805ea0cafbe0091546a39f60a8d516 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import TemplateView
from lms import settings
from django.conf.urls.static import static
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url("^$", TemplateView.as_view(template_name="index.html"), name="slides_home"),
# Week 1 - OO Python
url("^week1/1/$", TemplateView.as_view(template_name="week1/1.html"), name="week1_day1"),
url("^week1/2/$", TemplateView.as_view(template_name="week1/2.html"), name="week1_day2"),
url("^week1/3/$", TemplateView.as_view(template_name="week1/3.html"), name="week1_day3"),
url("^week1/4_am/$", TemplateView.as_view(template_name="week1/4_am.html"), name="week1_day4_am"),
url("^week1/4_pm/$", TemplateView.as_view(template_name="week1/4_pm.html"), name="week1_day4_pm"),
# Week 2 - DB Intro + Introductory Django
url("^week2/1_am/$", TemplateView.as_view(template_name="week2/1_am.html"), name="week2_day1_am"),
url("^week2/1_pm/$", TemplateView.as_view(template_name="week2/1_pm.html"), name="week2_day1_pm"),
url("^week2/2_am/$", TemplateView.as_view(template_name="week2/2_am.html"), name="week2_day2_am"),
url("^week2/2_pm/$", TemplateView.as_view(template_name="week2/2_pm.html"), name="week2_day2_pm"),
url("^week2/3_am/$", TemplateView.as_view(template_name="week2/3_am.html"), name="week2_day3_am"),
url("^week2/3_pm/$", TemplateView.as_view(template_name="week2/3_pm.html"), name="week2_day3_pm"),
url("^week2/4_am/$", TemplateView.as_view(template_name="week2/4_am.html"), name="week2_day4_am"),
url("^week2/4_pm/$", TemplateView.as_view(template_name="week2/4_pm.html"), name="week2_day4_pm"),
url("^week2/5_am/$", TemplateView.as_view(template_name="week2/5_am.html"), name="week2_day5_am"),
url("^week2/5_pm/$", TemplateView.as_view(template_name="week2/5_pm.html"), name="week2_day5_pm"),
# Start Project Cheatsheet
url("^start_project_cheatsheet/$", TemplateView.as_view(template_name="start_project.html"), name="start_project"),
# Week 3 - Introductory Django
url("^week3/1_am/$", TemplateView.as_view(template_name="week3/1_am.html"), name="week3_day1_am"),
url("^week3/1_pm/$", TemplateView.as_view(template_name="week3/1_pm.html"), name="week3_day1_pm"),
url("^week3/2_am/$", TemplateView.as_view(template_name="week3/2_am.html"), name="week3_day2_am"),
url("^week3/2_pm/$", TemplateView.as_view(template_name="week3/2_pm.html"), name="week3_day2_pm"),
url("^week3/3_am/$", TemplateView.as_view(template_name="week3/3_am.html"), name="week3_day3_am"),
url("^week3/3_pm/$", TemplateView.as_view(template_name="week3/3_pm.html"), name="week3_day3_pm"),
url("^week3/lab/$", TemplateView.as_view(template_name="week3/lab.html"), name="week3_lab"),
url(r'^test_overlay/$', 'slides.views.test_overlay', name='test_overlay'),
url(r'^teacher/$', 'slides.views.teacher', name='teacher'),
# User authentication
url(r'^register/$', 'slides.views.register', name='register'),
url(r'^login/$', 'django.contrib.auth.views.login', name='login'),
url(r'^logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'^account/$', 'slides.views.edit_account', name='edit_account'),
url(r'^done/$', 'slides.views.done', name='done'),
url(r'^help/$', 'slides.views.help', name='help'),
url(r'^question/$', 'slides.views.question', name='question'),
url(r'^teacher_index/$', 'slides.views.teacher_index', name='teacher_index'),
url(r'^lecture_fragment/$', 'slides.views.lecture_fragment', name='lecture_fragment'),
url(r'^details/$', 'slides.views.details', name='details'),
url(r'^update/$', 'slides.views.update', name='update'),
url(r'^student_actions/$', 'slides.views.student_actions', name='student_actions'),
url(r'^teacher/week(?P<week_number>\d+)/(?P<lecture_time>.+)/$', 'slides.views.lecture', name="lecture"),
url(r'^done/week(?P<week_number>\d+)/(?P<lecture_time>.+)/$', 'slides.views.done_test', name="done_test"),
url(r'^help/week(?P<week_number>\d+)/(?P<lecture_time>.+)/$', 'slides.views.help_test', name="help_test"),
url(r'^question/week(?P<week_number>\d+)/(?P<lecture_time>.+)/$', 'slides.views.question_test', name="question_test"),
)
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
995,475 | 93ebfab49478ca734c0fa29c148f26a726b8ddee | def minCostClimbingStairs(cost: list) -> int:
"""等填坑"""
min1, min2 = 0, 0
for i in range(2, len(cost)+1):
mincost = min(cost[i-1]+min2, cost[i-2]+min1)
min1, min2 = min2, mincost
return mincost
cost = [10, 15, 20]
print(minCostClimbingStairs(cost)) |
995,476 | 6fce1ccde3b3c78c7d068e789a1a9f4824954173 | '''
Code for plotting k-mer uniqueness ratio
Assumes count.cpp has already beeen run to put data in (k, # different kmers, # unique kmers, # kmers) format
'''
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
files = ['ecoli.kmers', 'celegans.kmers', 'chr1.kmers', 'tomato.kmers', 'human.kmers', 'wheat.kmers']
names = ['E. coli', 'C. elegans', 'H. sapiens (chr1)', 'S. lycopersicum', 'H. sapiens', 'T. aestivum']
colors = ['green', 'blue', 'purple', 'red', 'orange', 'yellow']
xs = []
ys = []
for fn in files:
with open(fn) as f:
curxs = []
curys = []
for line in f:
tokens = line.split(' ')
if len(tokens) == 4:
x = int(tokens[0])
y = float(tokens[2]) / float(tokens[3])
if x > 300:
continue
curxs.append(x)
curys.append(y)
xs.append(curxs)
ys.append(curys)
sns.set()
for i in range(0, len(xs)):
sns.lineplot(xs[i], ys[i], color = colors[i])
plt.title('K-mer Uniqueness Ratio')
plt.xlabel('K-mer Length')
plt.ylabel('Proportion of Unique K-mers')
plt.legend(labels = names)
plt.savefig('uniquekmers.png')
|
995,477 | ffc6d982391e1bb0e33c462842dd2818abcd2321 | import logging
import time
from kiteconnect import KiteTicker
logging.basicConfig(level=logging.DEBUG)
api_key = open('/home/akkey/Desktop/Django-projects/django-sockets/demo1/integers/api_key.txt', 'r').read()
access_token = "pt5vbS56ncUWLl2bqd5FjuH1oM4iJ7pp"
tokens = [5215745, 633601, 1195009, 779521, 758529, 1256193, 194561, 1837825, 952577, 1723649, 3930881, 4451329, 593665, 3431425, 2905857, 3771393, 3789569, 3463169, 381697, 54273, 415745, 2933761, 3580417, 49409, 3060993, 4464129, 3375873, 4574465, 636673, 3721473, 2796801]
data = []
kws = KiteTicker(api_key, access_token)
def on_ticks(ws, ticks):
# logging.debug("Ticks: {}".format(ticks[0]))
print("Hiiemowe")
data.clear()
# print(ticks[0])
data.extend(ticks)
def on_connect(ws, response):
print("hellooooooo")
ws.subscribe(tokens)
ws.set_mode(ws.MODE_FULL, tokens)
def on_close(ws, code, reason):
ws.stop()
kws.on_ticks = on_ticks
kws.on_connect = on_connect
kws.on_close = on_close
print ('Hiiiiiiiiiiiiiiiiii')
kws.connect(threaded= True, disable_ssl_verification=False)
print ('HIIIIIIIIIIIIII')
|
995,478 | 323343ffed3328c71c5ac9b282295314ea3d39f7 | # -*- coding: utf-8 -*-
import time
import datetime
from django.core.urlresolvers import reverse
from django.core.cache import cache
from django.conf import settings
from forum_integration.api import DB, forum_login, forum_logout
class LoginUserOnForum(object):
def process_response(self, request, response):
if response.status_code == 301:
return response
is_authenticated = request.user.is_authenticated()
logged_on_forum = request.COOKIES.get('logged_on_forum', None)
pass_hash = request.COOKIES.get('pass_hash', None)
member_id = request.COOKIES.get('member_id', None)
if request.path == reverse(
'django.contrib.auth.views.login') and is_authenticated:
response = forum_login(request, response)
if request.path == reverse('django.contrib.auth.views.logout'):
response = forum_logout(request, response)
return response
class ActiveUserMiddleware:
def process_request(self, request):
current_user = request.user
if request.user.is_authenticated():
now = datetime.datetime.now()
cache.set('seen_%s' % (current_user.username), now,
settings.USER_LASTSEEN_TIMEOUT) |
995,479 | 6a74e9b1e0c69c3a55b3ee4925124cfcce8ff562 | # Generated by Django 3.1 on 2020-10-23 07:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('netmedsapp', '0004_auto_20201021_1124'),
]
operations = [
migrations.CreateModel(
name='cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cart_owner', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='cart', to=settings.AUTH_USER_MODEL)),
('products', models.ManyToManyField(blank=True, null=True, related_name='cart', to='netmedsapp.Medicines')),
],
),
]
|
995,480 | f48e997053951a6b20bf9649943f21fd66437727 | from LinkedList.node import Node, DoublyNode
class LinkedList:
def __init__(self, head=None): # input head for init with other linked list
if head is None:
self.head = None
self.tail = None
else:
self.head = head
buffer = self.head
while buffer.next_node is not None:
buffer = buffer.next_node # move current node to next node
self.tail = buffer
def is_empty(self):
return self.head is None # must handle self.head properly
def size(self): # number of node
count = 0
buffer = self.head
while buffer is not None:
count += 1
buffer = buffer.next_node
return count
def search(self, value): # equivalent of is_in()
buffer = self.head
while buffer is not None:
if buffer.value == value:
return True
buffer = buffer.next_node
return False
def index(self, value): # find index of value
count = 0
buffer = self.head
while buffer is not None:
if buffer.value == value:
return count
count += 1
buffer = buffer.next_node
return -1
def node_at(self, pos):
count = 0
buffer = self.head
while buffer is not None:
if count == pos:
return buffer
count += 1
buffer = buffer.next_node
print('pos out of bound')
return
def append(self, value): # for unordered list
if self.head is None:
self.head = Node(value)
self.tail = self.head
else:
'''
buffer = self.head
while buffer.next_node is not None:
buffer = buffer.next_node
buffer.next_node = Node(value)
self.tail = buffer.next_node
'''
self.tail.next_node = Node(value)
self.tail = self.tail.next_node
def pop(self, pos=None):
'''
is empty?
pop out of bound? -> pos<0 or pos>=self.size()
pop tail
pop head
pop middle
is empty?
'''
if not self.is_empty():
if pos is None: # pop last element
pos = self.size()-1 # pop tail
else:
if pos < 0 or pos >= self.size(): # out of bound
print('Index out of bound')
return
count = 0
prev = None
buffer = self.head
while buffer is not None and count != pos:
prev = buffer
buffer = buffer.next_node
count += 1
if buffer is self.head: # pop head
val = self.head.value
buffer = self.head.next_node
self.head.next_node = None
self.head = buffer
elif buffer is self.tail: # pop tail
val = self.tail.value
prev.next_node = None
self.tail = prev
else: # pop middle
val = buffer.value
prev.next_node = buffer.next_node
buffer.next_node = None
if self.is_empty():
self.tail = None
return val
else:
print("Linked List is already empty")
return
def insert(self, pos, value): # for unordered list -> at arbitrary position (insert before index)
'''
is index out of bound? -> fix to lower bound/upper bound
is empty?
insert at head
insert at tail
insert in the middle
'''
if pos < 0:
pos = 0
elif pos >= self.size(): # ???
self.append(value)
return
count = 0
prev = None
buffer = self.head
while buffer is not None and count != pos:
count += 1
prev = buffer
buffer = buffer.next_node
if buffer is self.head:
new_node = Node(value, self.head)
self.head = new_node
elif buffer is self.tail: # same as else
prev.next_node = Node(value, buffer)
else:
prev.next_node = Node(value, buffer)
def add(self, value): # for ordered list (priority queue) -> selective insertion : not traditional LinkedList
'''
is empty?
insert at head
insert at tail
insert in the middle
'''
pass
def remove(self, value): # pop specific value once
'''
is empty?
existed?
remove at head
remove at tail
remove in the middle
is empty?
'''
if not self.search(value): # if not found or list is empty -> can also implied empty list
print('No such value in the list')
return
else:
prev = None
buffer = self.head
while buffer is not None:
if buffer.value == value:
break
prev = buffer
buffer = buffer.next_node
if buffer is None: # maybe i can skip this part cause i've searched for the element and it existed
print("Value not found")
return
if buffer is self.head:
new_head = self.head.next_node
self.head.next_node = None
self.head = new_head
elif buffer is self.tail:
prev.next_node = None
self.tail = prev
else: # middle deletion
prev.next_node = buffer.next_node
buffer.next_node = None
if self.is_empty():
self.tail = None
def __len__(self):
return self.size()
def __str__(self):
out = "LinkedList size: " + str(self.size())+"\tItems: "
buffer = self.head
while buffer is not None:
out += str(buffer.value) + " "
buffer = buffer.next_node
return out
class DoublyLinkedList:
def __init__(self):
self.head = None
self.tail = None
def test_list():
linked_list = LinkedList()
for item in range(5):
linked_list.append(item)
print(linked_list)
linked_list.insert(0, 'ABHEAD')
linked_list.insert(linked_list.size()-1, 'ABTAIL') # should insert at tail
print(linked_list)
linked_list.insert(4, 'MIDDLE')
print(linked_list)
while not linked_list.is_empty():
linked_list.remove(linked_list.tail.value)
print(linked_list)
print(linked_list)
if __name__ == '__main__':
test_list()
|
995,481 | 6b25725379bf6bea26be5ea912fc35e68083484b | from flask import Flask
from flask.json import jsonify
from subprocess import call
import os, threading, time
import requests
app = Flask(__name__)
running = True
devnull = open(os.devnull, 'w')
mongo_db_ip = "Hadoop:smartcity@143.129.39.127"
mongo_db_port = "27017"
db = "Votes"
inCollection = "Votes"
command = [os.environ["HADOOP_HOME"]+ "/bin/hadoop",
"jar",
"map_reduce.jar",
"MapReduce",
"OPERATION",
mongo_db_ip,
mongo_db_port,
db + "." + inCollection,
db + "." + "OUTCOLLECTION",
"", ""]
class Updater(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
counter = 0
while running:
# recalculate every 5 min.
if counter == 300:
print("------- Automatic update started --------")
requests.get("http://localhost:8080/countVotes")
requests.get("http://localhost:8080/countUserVotes")
counter = 0
time.sleep(1)
counter = counter + 1
mutex = threading.Lock()
class MapReduce():
def __init__(self, operation, outCollection):
self.operation = operation
self.outCollection = outCollection
self.argument1 = ""
self.argument2 = ""
def setArgument1(self, new_arg):
self.argument1 = new_arg
def setArgument2(self, new_arg):
self.argument2 = new_arg
def run(self):
if not mutex.locked():
mutex.acquire()
command[4] = self.operation
command[8] = db + "." + self.outCollection
command[9] = self.argument1
command[10] = self.argument2
print("Executing: " + self.operation + ", saving to collection: " + self.outCollection)
#print(command)
call(command, stdout=devnull, stderr=devnull)
mutex.release()
return True
return False
@app.route("/countVotes")
def countVotes():
thread = MapReduce("vote_count", "vote_cache")
res = "done" if thread.run() else "busy"
return jsonify({"calculation": res})
@app.route("/countUserVotes")
def countUserVotes():
thread = MapReduce("user_vote_count", "user_votes_cache")
res = "done" if thread.run() else "busy"
return jsonify({"calculation": res})
@app.route("/timeCount/<song_id>/<timestamp>")
def countTime(song_id, timestamp):
thread = MapReduce("time_count", "time_vote_cache")
thread.setArgument1(str(timestamp))
thread.setArgument2(str(song_id))
res = "done" if thread.run() else "busy"
return jsonify({"calculation": res})
if __name__ == '__main__':
updater = Updater()
updater.start()
app.run(host='0.0.0.0',port=8080)
running = False
updater.join()
|
995,482 | ac6731b891430a8b480de61bc3c44efe575bc49c | from typing import Optional, Dict, Any
import pandas as pd
from bokeh.models import ColumnDataSource
from bokeh.plotting import Figure
from torch import Tensor
class Tensor2DPlot:
data_source: ColumnDataSource
def _create_2d_plot_data(self, task_size: int, data: Optional[Tensor]) -> pd.DataFrame:
if data is None:
return pd.DataFrame([], columns=['x', 'y', 'color'])
t = data[:, 0:2]
return pd.DataFrame(
[[t[i, 0].item(), t[i, 1].item(), 'red' if i < task_size else 'blue'] for i in range(t.shape[0])],
columns=['x', 'y', 'color'])
def update(self, data: Tensor, config: Dict[str, Any]):
task_size = config['task_size']
self.data_source.data = self._create_2d_plot_data(task_size, data)
def create_2d_plot(self) -> Figure:
self.data_source = ColumnDataSource(data=self._create_2d_plot_data(None, None))
fig = Figure(width=300, height=300, x_range=(-2, 2), y_range=(-2, 2))
fig.circle(x='x', y='y', color='color', source=self.data_source)
return fig |
995,483 | 019ffe0eeb4b5f961b608620cf23211dfe2b212e | #!/usr/bin/env python
import re
import os
#---Intersection Directory
indir1 = "/home/emma.levin/tools/Rprof5_emma/Out/Intersection"
#---Population Directory
indir2 = "/home/emma.levin/tools/Rprof5_emma/Out/Population"
#exps=["cntl2015"]
#exps=["cntl1990"]
#exps=["cntl1940"]
#exps=["cntl1860"]
#exps=["HadISST"]
#exps=["rcp45ear"]
#exps=["rcp45late"]
exps=["HURDAT2"]
kt2ms = 0.514444
#radius = ["rmi", "64", "50", "34"]
radius = ["rmi","64","50","34"]
tfls = {
"cntl2015":"../Get_r34/tcinfo_2015Cntl.txt",
"cntl1990":"../Get_r34/tcinfo_1990Cntl.txt",
"cntl1940":"../Get_r34/tcinfo_1940Cntl.txt",
"cntl1860":"../Get_r34/tcinfo_1860Cntl.txt",
"HadISST" :"../Get_r34/tcinfo_contHadISST.txt",
"rcp45ear":"../Get_r34/tcinfo_HadIISTrcp45ear.txt",
"HURDAT2":"../Get_r34/tcinfo_HURDAT2.txt",
}
pfls = {
"cntl2015":"../../Out/Population/pop_2015m_b2015.txt",
"cntl1990":"../../Out/Population/pop_1990m_b2015.txt",
"cntl1940":"../../Out/Population/pop_1940m_b2015.txt",
"cntl1860":"../../Out/Population/pop_1860m_b2015.txt",
"HadISST" :"../../Out/Population/pop_1990m_b2015.txt",
"rcp45ear":"../../Out/Population/pop_2025m_b2015.txt",
"rcp45late":"../../Out/Population/pop_2090m_b2015.txt",
"HURDAT2":"../../Out/Population/pop_2015m_b2015.txt",
}
periods={
"cntl2015":(1,200),
"cntl1940":(1,200),
"cntl1990":(1,200),
"cntl1860":(1,200),
"HadISST" :(151,220),
"rcp45ear":(151,222),
"rcp45late":(151,221),
"HURDAT2":(2004,2017),
}
outdir="../../Out/RIDX"
class TC:
def __init__(self,idtid):
self.idtid = idtid
self.id = []
self.tid = []
self.lon = []
self.lat = []
self.countyns = []
self.name = []
self.year = []
self.month = []
self.day = []
self.hour = []
self.ws = []
self.wsk = []
self.precip = []
return
class County:
def __init__(self,countyns,name,fid):
self.countyns = countyns
self.name = name
self.fid = fid
self.freq = 1
self.tc={}
return
def make_tcdata(self, id, tid, lon, lat, countyns, name, ws, wsk, precip, year, month, day, hour):
idtid = "%8.8i-%3.3i" % (id,tid)
if not self.tc.has_key(idtid):
self.tc[idtid] = TC(idtid)
self.tc[idtid].id.append(id)
self.tc[idtid].tid.append(tid)
self.tc[idtid].lon.append(lon)
self.tc[idtid].lat.append(lat)
self.tc[idtid].lat.append(ws)
self.tc[idtid].countyns.append(countyns)
self.tc[idtid].name.append(name)
self.tc[idtid].year.append(year)
self.tc[idtid].month.append(month)
self.tc[idtid].day.append(day)
self.tc[idtid].hour.append(hour)
self.tc[idtid].ws.append(ws)
self.tc[idtid].wsk.append(wsk)
self.tc[idtid].precip.append(precip)
return
#---subroutines
def read_intersect(infile,tdata,fixedws=True):
cdata = {}
f = open(infile,"r")
for ii,line in enumerate(f.readlines()):
if ii >= 1:
temp=re.split(",",line.rstrip())
#print infile,temp
fid = int(temp[2])
countyns = int(temp[6])
name = str(temp[7]).upper()
id = int(float(temp[12]))
tid = int(float(temp[13]))
idtid = "%8.8i-%3.3i" % (id,tid)
lon = float(temp[15])
lat = float(temp[16])
year = tdata[idtid]["year"]
month = tdata[idtid]["month"]
day = tdata[idtid]["day"]
hour = tdata[idtid]["hour"]
if fixedws ==False:
ws = float(temp[17])
wsk = "rmi"
precip = tdata[idtid]["prmi"]
else:
ws = kt2ms * fixedws
wsk = "%s" % (str(int(fixedws)))
precip = tdata[idtid]["p%i" % int(fixedws) ]
if not cdata.has_key(countyns):
cdata[countyns] = County(countyns,name,fid)
else:
cdata[countyns].freq += 1
cdata[countyns].make_tcdata(id,tid,lon,lat, countyns, name,ws, wsk, precip, year,month,day,hour)
f.close()
return cdata
def read_tc(infile):
tdata = {}
f = open(infile,"r")
for ii,line in enumerate(f.readlines()):
if ii >= 1:
temp=re.split(",",line.rstrip())
id = int(temp[0])
tid = int(temp[1])
idtid = "%8.8i-%3.3i" % (id,tid)
year = int(temp[2])
month = int(temp[3])
day = int(temp[4])
hour = int(temp[5])
if len(temp)>15:
prmi = float(temp[15])
p64kt = float(temp[16])
p50kt = float(temp[17])
p34kt = float(temp[18])
p20kt = float(temp[19])
else:
prmi = 0.0
p64kt = 0.0
p50kt = 0.0
p34kt = 0.0
p20kt = 0.0
tdata[idtid] = {"id":id, "tid":tid, "year":year, "month":month,"day":day,"hour":hour,"prmi":prmi,"p64":p64kt,"p50":p50kt,"p34":p34kt,"p20":p20kt}
return tdata
def read_population(infile):
pdata = {}
f = open(infile,"r")
for ii,line in enumerate(f.readlines()):
if ii >= 1:
temp=re.split(",",line.rstrip())
cid = "%7.7i" % (int(temp[0]))
pop = int(temp[1])
pdata[cid] = pop
f.close()
return pdata
if __name__ == "__main__":
for exp in exps:
outfl = "%s/ridx_%s_basic.txt" % (outdir,exp)
fo=open(outfl,"w")
fo.write("%8s,%4s,%4s,%2s,%2s,%2s,%9s,%9s,%3s,%8s,%9s,%8s,%10s,%15s\n" % ("id","tid","y","m","d","h","lon","lat","wc","wind","precip","pop","fid","cname"))
cdata = {}
#--get original TC info
tfl = tfls[exp]
tdata = read_tc(tfl)
#--get population
pfl = pfls[exp]
pdata = read_population(pfl)
#--get wind data
wdata = {}
ndata = {}
for rad in radius:
fin = "%s/Intersection_%s_%s.txt" % (indir1,exp,rad)
#--only for existing intersection file
if not os.path.exists(fin):
continue
if rad == "rmi":
cdata[exp] = read_intersect(fin,tdata,fixedws=False)
else:
cdata[exp] = read_intersect(fin,tdata,fixedws=float(rad))
for county in cdata[exp].keys():
for id in cdata[exp][county].tc.keys():
ids = cdata[exp][county].tc[id].id
tids = cdata[exp][county].tc[id].tid
years = cdata[exp][county].tc[id].year
months = cdata[exp][county].tc[id].month
days = cdata[exp][county].tc[id].day
hours = cdata[exp][county].tc[id].hour
wss = cdata[exp][county].tc[id].ws
wsks = cdata[exp][county].tc[id].wsk
precips = cdata[exp][county].tc[id].precip
lons = cdata[exp][county].tc[id].lon
lats = cdata[exp][county].tc[id].lat
names = cdata[exp][county].tc[id].name
countynss = cdata[exp][county].tc[id].countyns
for id,tid,year,month,day, hour,ws,wsk,precip,lon,lat,name,countyns in zip(ids,tids,years,months,days,hours,wss,wsks,precips,lons,lats,names,countynss):
uid = "%7.7i-%8.8i-%3.3i-%4.4i-%2.2i-%2.2i-%2.2i" % (county,id,tid,year,month,day,hour)
#print rad, county, "uid=",uid, "year=",year, "month",month,"day=",day,"hour",hour,"ws=",ws
if not wdata.has_key(uid):
wdata[uid] = "%8i,%4i,%4i,%2.2i,%2.2i,%2.2i,%9.4f,%9.4f,%3s,%8.4f,%9.4f" % (id,tid,year,month,day,hour,lon,lat,wsk,ws,precip)
ndata[uid] = "%10s,%15s" % (countyns, name)
for uid in wdata.keys():
if pdata.has_key(uid[0:7]):
#print "%s %s %8i" % (uid, wdata[uid], int(pdata[uid[0:7]]))
print "%s,%8i" % (wdata[uid], int(pdata[uid[0:7]]))
fo.write("%s,%8i,%s\n" % (wdata[uid], int(pdata[uid[0:7]]),ndata[uid]))
fo.close()
|
995,484 | 4c633734c679d5b999e08028e5d01ca0fb22763b | # Reverse Linked List II: https://leetcode.com/problems/reverse-linked-list-ii/
# Given the head of a singly linked list and two integers left and right where left <= right, reverse the nodes of the list from position left to position right, and return the reversed list.
from types import Optional
# Definition for singly-linked list.
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# In this problem all we have to do is traverse until we find left and then go ahead and reverse every node up to the right
# the only trick to keep note of is if the head node is the left we will also need to reassing our head node.
class Solution:
def reverseBetween(self, head: Optional[ListNode], left: int, right: int) -> Optional[ListNode]:
if head is None:
return
if left == right:
return head
cur = head
prev = None
# We want to keep the previous node so we go until left == 1 (indexed from 1 as well)
while left > 1:
prev = cur
cur = cur.next
left -= 1
right -= 1
# Now to reverse (we know the tail will end up in our cur pointer and that we need to connect the previous to the reversed nodes)
tail, connection = cur, prev
while right > 0:
temp = cur.next
cur.next = prev
prev = cur
cur = temp
right -= 1
# Now we know if our connector is None that we actually started reversing at the first node so we will update head
if connection is None:
head = prev
else:
connection.next = prev
# Now we need to make sure that our tail of the swapped node points to the next node
tail.next = cur
return head
# This runs in O(N) as we will only traverse once and uses O(1) as we simply are reversing in place
# I think you could have also solved this with backtracking but it is more complicated
# Score Card
# Did I need hints? N
# Did you finish within 30 min? 10
# Was the solution optimal? Ye
# Were there any bugs? Ne
# 5 5 5 5 = 5
|
995,485 | a02e24cdf5e4b3b7a066a01d1c1784ad7d55140f | # put your python code here
hour_one = int(input())
minute_one = int(input())
second_one = int(input())
hour_two = int(input())
minute_two = int(input())
second_two = int(input())
hour = (hour_two - hour_one) * 60 * 60
minute = (minute_two - minute_one) * 60
second = (second_two - second_one)
print(hour + minute + second)
|
995,486 | bca7f1fe07da8ac1f235781d521b295d5a6b8a42 | """
Два списка целых заполняются случайными числами(использовать нужную функцию из модуля random). Необходимо:
a. Сформировать третий список, содержащий элементы обоих списков
b. Сформировать третий список, содержащий элементы обоих списков без повторений;
c. Сформировать третий список, содержащий элементы общие для двух списков;
d. Сформировать третий список, содержащий только уникальные элементы каждого из списков;
e. Сформировать третий список, содержащий только минимальное и максимальное значение каждого из списков.
"""
import random
first_random_number = int(input("Enter first random number: "))
len_first_list = int(input("Enter length of the first list: "))
second_random_number = int(input("Enter second random number: "))
len_second_list = int(input("Enter length of the second list: "))
operation = input("Enter operation [a, b, c, d, e]: ")
list_1 = [random.randint(0, first_random_number) for i in range(len_first_list)]
list_2 = [random.randint(0, second_random_number) for j in range(len_second_list)]
print(f"List_1: {list_1}")
print(f"List_2: {list_2}\n")
if operation == "a":
print(f"List_a: {list_1 + list_2}")
elif operation == "b":
list_b = list_1 + [b for b in list_2 if b not in list_1]
for i in range(len(list_b)):
while list_b.count(i) > 1:
list_b.remove(i)
print(f"List_b: {list_b}")
elif operation == "c":
list_c = [i for i in list_1 if i in list_2]
print(f"List_b: {list_c}")
elif operation == "d":
list_d = [i for i in list_1 + list_2 if i not in list_1 or i not in list_2]
for i in range(len(list_d)):
while list_d.count(i) > 1:
list_d.remove(i)
print(f"List_d: {list_d}")
elif operation == "e":
list_e = [min(list_1), max(list_1), min(list_2), max(list_2)]
print(f"List_e: {list_e}")
else:
print(f"Error: unknown operation symbol: {operation}")
|
995,487 | a52cd0da1cfae6c836e0aee1b0ff8b0d421aa49f | from flask import render_template, redirect, request, url_for, flash
from . import detail_product_raw
from .forms import SearchForm
from app.models import ProductRaw, DetailProductRaw, DetailStock, Product, User, Memorandum, DetailMemorandum
from flask_login import login_required, current_user
from ..helper.views import formatrupiah
import json, datetime
# List Product Raw
@detail_product_raw.route('/detail_product_raw', methods=['GET', 'POST'])
@login_required
def functionGetDetailProductRaw():
form = SearchForm()
product_raw_id = request.args.get('id')
product = DetailProductRaw.select(DetailProductRaw, Product)\
.join(Product, on=(Product.id == DetailProductRaw.product_id))\
.where(DetailProductRaw.product_raw_id == product_raw_id)
total=0
for row in product:
total = total + row.amount
product_raw = ProductRaw.get_by_id(product_raw_id)
sisa = product_raw.amount - total
sisa = formatrupiah(sisa)
total = formatrupiah(total)
product_raw.amount = formatrupiah(product_raw.amount)
return render_template('detail_product_raw/list_detail_product_raw.html', current_user=current_user, form=form, product_raw=product_raw, len_product=len(product), product=product, total=total, sisa=sisa) |
995,488 | 505bbaf9069408d8db8553b58e241df8b0ec8654 | import unittest
from API import ReadQueue
class TestReadQueue(unittest.TestCase):
def test_query_from_queue(self):
return None
def test_is_email(self):
return None
def test_is_shop(self):
return None
def test_is_product(self):
return None
def test_prod_to_id(self):
return None
def test_make_query(self):
return None
|
995,489 | 40c34db6d3baa01392e11352b06c59cc7ec99c53 | from flaskboiler.service import Service
from flaskboiler.users.data import UserData
class UserService(Service):
def __init__(self):
super(UserService, self).__init__(UserData())
|
995,490 | e06898887145fee0989ccde2ca213bb5f5671195 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2012 ~ 2013 Deepin, Inc.
# 2012 ~ 2013 Long Wei
#
# Author: Long Wei <yilang2007lw@gmail.com>
# Maintainer: Long Wei <yilang2007lw@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from nmsetting import NMSetting
from nmlib.nm_utils import TypeConvert
class NMSettingGsm (NMSetting):
'''NMSettingGsm'''
def __init__(self):
NMSetting.__init__(self)
self.name = "gsm"
@property
def number(self):
if "number" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["number"])
@number.setter
def number(self, new_number):
self.prop_dict["number"] = TypeConvert.py2_dbus_string(new_number)
@number.deleter
def number(self):
if "number" in self.prop_dict.iterkeys():
del self.prop_dict["number"]
@property
def username(self):
if "username" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["username"])
@username.setter
def username(self, new_user_name):
self.prop_dict["username"] = TypeConvert.py2_dbus_string(new_user_name)
@username.deleter
def username(self):
if "username" in self.prop_dict.iterkeys():
del self.prop_dict["username"]
@property
def password(self):
if "password" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["password"])
@password.setter
def password(self, new_password):
self.prop_dict["password"] = TypeConvert.py2_dbus_string(new_password)
@password.deleter
def password(self):
if "password" in self.prop_dict.iterkeys():
del self.prop_dict["password"]
@property
def password_flags(self):
if "password-flags" in self.prop_dict.iterkeys():
return self.prop_dict["password-flags"]
@password_flags.setter
def password_flags(self, new_password_flags):
self.prop_dict["password-flags"] = TypeConvert.py2_dbus_uint32(new_password_flags)
@password_flags.deleter
def password_flags(self):
if "password-flags" in self.prop_dict.iterkeys():
del self.prop_dict["password-flags"]
@property
def apn(self):
if "apn" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["apn"])
@apn.setter
def apn(self, new_apn):
self.prop_dict["apn"] = TypeConvert.py2_dbus_string(new_apn)
@apn.deleter
def apn(self):
if "apn" in self.prop_dict.iterkeys():
del self.prop_dict["apn"]
@property
def network_id(self):
if "network-id" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["network-id"])
@network_id.setter
def network_id(self, new_network_id):
self.prop_dict["network-id"] = TypeConvert.py2_dbus_string(new_network_id)
@network_id.deleter
def network_id(self):
if "network-id" in self.prop_dict.iterkeys():
del self.prop_dict["network-id"]
@property
def network_type(self):
if "network-type" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["network-type"])
@network_type.setter
def network_type(self, new_network_type):
self.prop_dict["network-type"] = TypeConvert.py2_dbus_uint32(new_network_type)
@network_type.deleter
def network_type(self):
if "network-type" in self.prop_dict.iterkeys():
del self.prop_dict["network-type"]
@property
def allowed_bands(self):
if "allowed-bands" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["allowed-bands"])
@allowed_bands.setter
def allowed_bands(self, new_allowed_bands):
self.prop_dict["allowed-bands"] = TypeConvert.py2_dbus_uint32(new_allowed_bands)
@allowed_bands.deleter
def allowed_bands(self):
if "allowed-bands" in self.prop_dict.iterkeys():
del self.prop_dict["allowed-bands"]
@property
def pin(self):
if "pin" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["pin"])
@pin.setter
def pin(self, new_pin):
self.prop_dict["pin"] = TypeConvert.py2_dbus_string(new_pin)
@pin.deleter
def pin(self):
if "pin" in self.prop_dict.iterkeys():
del self.prop_dict["pin"]
@property
def pin_flags(self):
if "pin-flags" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["pin-flags"])
@pin_flags.setter
def pin_flags(self, new_pin_flags):
self.prop_dict["pin-flags"] = TypeConvert.py2_dbus_uint32(new_pin_flags)
@pin_flags.deleter
def pin_flags(self):
if "pin-flags" in self.prop_dict.iterkeys():
del self.prop_dict["pin-flags"]
@property
def home_only(self):
if "home-only" in self.prop_dict.iterkeys():
return TypeConvert.dbus2py(self.prop_dict["home-only"])
@home_only.setter
def home_only(self, new_home_only):
self.prop_dict["home-only"] = TypeConvert.py2_dbus_boolean(new_home_only)
@home_only.deleter
def home_only(self):
if "home-only" in self.prop_dict.iterkeys():
del self.prop_dict["home-only"]
if __name__ == "__main__":
pass |
995,491 | f7fb224b17b5983aa6a5c58ec7114d42813553dc | # coding: utf-8
import time
def get_timestamp():
return str(time.time()).split(".")[0] |
995,492 | d114a7b614d359d4d93b306ccb0d0fe93446ab64 | # Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import asyncio
import logging
import uuid
import aiohttp
from bentoml import config
from bentoml.utils.trace import async_trace, make_http_headers
from bentoml.marshal.utils import merge_aio_requests, split_aio_responses
logger = logging.getLogger(__name__)
ZIPKIN_API_URL = config("tracing").get("zipkin_api_url")
class Parade:
STATUSES = (STATUS_OPEN, STATUS_CLOSED, STATUS_RETURNED,) = range(3)
def __init__(self):
self.batch_input = OrderedDict()
self.batch_output = None
self.returned = asyncio.Condition()
self.status = self.STATUS_OPEN
def feed(self, id_, data):
assert self.status == self.STATUS_OPEN
self.batch_input[id_] = data
return True
async def start_wait(self, interval, call):
try:
await asyncio.sleep(interval)
self.status = self.STATUS_CLOSED
outputs = await call(self.batch_input.values())
self.batch_output = OrderedDict(
[(k, v) for k, v in zip(self.batch_input.keys(), outputs)]
)
self.status = self.STATUS_RETURNED
async with self.returned:
self.returned.notify_all()
except Exception as e: # noqa TODO
raise e
finally:
# make sure parade is closed
self.status = self.STATUS_CLOSED
class ParadeDispatcher:
def __init__(self, interval):
'''
params:
* interval: milliseconds
'''
self.interval = interval
self.callback = None
self._current_parade = None
def get_parade(self):
if self._current_parade and self._current_parade.status == Parade.STATUS_OPEN:
return self._current_parade
self._current_parade = Parade()
asyncio.get_event_loop().create_task(
self._current_parade.start_wait(self.interval / 1000.0, self.callback)
)
return self._current_parade
def __call__(self, callback):
self.callback = callback
async def _func(inputs):
id_ = uuid.uuid4().hex
parade = self.get_parade()
parade.feed(id_, inputs)
async with parade.returned:
await parade.returned.wait()
return parade.batch_output.get(id_)
return _func
class MarshalService:
_MARSHAL_FLAG = config("marshal_server").get("marshal_request_header_flag")
def __init__(self, target_host="localhost", target_port=None):
self.target_host = target_host
self.target_port = target_port
self.batch_handlers = dict()
def set_target_port(self, target_port):
self.target_port = target_port
def add_batch_handler(self, api_name, max_latency):
if api_name not in self.batch_handlers:
@ParadeDispatcher(max_latency)
async def _func(requests):
headers = {self._MARSHAL_FLAG: 'true'}
api_url = f"http://{self.target_host}:{self.target_port}/{api_name}"
with async_trace(
ZIPKIN_API_URL,
service_name=self.__class__.__name__,
span_name=f"merged {api_name}",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
reqs_s = await merge_aio_requests(requests)
async with aiohttp.ClientSession() as client:
async with client.post(
api_url, data=reqs_s, headers=headers
) as resp:
resps = await split_aio_responses(resp)
if resps is None:
return [aiohttp.web.HTTPInternalServerError] * len(requests)
return resps
self.batch_handlers[api_name] = _func
async def request_dispatcher(self, request):
with async_trace(
ZIPKIN_API_URL,
request.headers,
service_name=self.__class__.__name__,
span_name="handle request",
):
api_name = request.match_info['name']
if api_name in self.batch_handlers:
resp = await self.batch_handlers[api_name](request)
return resp
else:
resp = await self._relay_handler(request, api_name)
return resp
def make_app(self):
app = aiohttp.web.Application()
app.router.add_post('/{name}', self.request_dispatcher)
return app
def fork_start_app(self, port):
# Use new eventloop in the fork process to avoid problems on MacOS
# ref: https://groups.google.com/forum/#!topic/python-tornado/DkXjSNPCzsI
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
app = self.make_app()
aiohttp.web.run_app(app, port=port)
async def _relay_handler(self, request, api_name):
data = await request.read()
headers = request.headers
api_url = f"http://{self.target_host}:{self.target_port}/{api_name}"
with async_trace(
ZIPKIN_API_URL,
service_name=self.__class__.__name__,
span_name=f"{api_name} relay",
) as trace_ctx:
headers.update(make_http_headers(trace_ctx))
async with aiohttp.ClientSession() as client:
async with client.post(
api_url, data=data, headers=request.headers
) as resp:
body = await resp.read()
return aiohttp.web.Response(
status=resp.status, body=body, headers=resp.headers,
)
|
995,493 | 7ef4b762d2643f97143bfe8d15ba6a06d2e0191c | print("Nhap so phan tu cua mang")
n = int(input())
list = []
temp = 0
print("Nhap so k")
k = int(input())
for i in range(n):
list.append(int(input()))
for i in range(n-1):
for j in range (i+1,n):
if list[i]>list[j] :
temp = list[i]
list[i] = list[j]
list[j] = temp
for i in range(n):
if(i==k-1):
print(list[i])
|
995,494 | 7d8c0432da1a6d391b3694bbe752c0a7670c312b | ab = raw_input()
if(ab>='a' and ab<='z'):
print("Alphabet")
elif(ab>='A' and ab<='Z'):
print("Alphabet")
else:
print("No")
|
995,495 | 5f0e214d871d09883e581ca9706878faeff511f2 | __author__ = "Nick Isaacs"
import configparser
import os
import logging.handlers
import logging
import sys
RELATIVE_CONFIG_PATH = "../config/gnip.cfg"
class Envirionment(object):
def __init__(self):
# Just for reference, not all that clean right now
self.config_file_name = None
self.config = None
self.setup_config()
self.streamname = self.config.get('gnip', 'streamName')
self.logr = None
self.rotating_handler = None #shared across modules
self.setup_logs()
self.logr.info("readding configuration file: %s"%self.config_file_name)
self.username = self.config.get('gnip', 'userName')
self.password = self.config.get('gnip', 'password')
self.streamurl = self.config.get('gnip', 'streamURL')
try:
self.compressed = self.config.getboolean('gnip', 'compressed')
except configparser.NoOptionError:
self.compressed = True
def setup_logs(self):
self.logr = logging.getLogger(__name__)
self.logfilepath = self.config.get('logger', 'logFilePath').strip(r'^/') or "log"
try:
os.mkdir(self.logfilepath)
except OSError:
# File exists
pass
logging_level={'CRITICAL': logging.CRITICAL, 'ERROR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO, 'DEBUG': logging.DEBUG}
self.logr.setLevel(logging_level[self.config.get('logger', 'logLevel').strip(r'^/').upper() or 'DEBUG'])
if self.logr.level>=logging.INFO:
formatString="%(asctime)s: %(message)s"
else:
formatString="%(asctime)s [%(levelname)s] [%(module)s.%(funcName)s]: %(message)s"
self.rotating_handler = logging.handlers.RotatingFileHandler(
filename=self.logfilepath + "/%s-log" % self.streamname,
mode='a', maxBytes=2 ** 24, backupCount=5)
self.rotating_handler.setFormatter(logging.Formatter(formatString))
self.logr.addHandler(self.rotating_handler)
def setup_config(self):
if 'GNIP_CONFIG_FILE' in os.environ:
self.config_file_name = os.environ['GNIP_CONFIG_FILE']
else:
dir = os.path.dirname(__file__)
self.config_file_name = os.path.join(dir, RELATIVE_CONFIG_PATH)
if not os.path.exists(self.config_file_name):
self.logr.debug("No configuration file found.")
sys.exit()
self.config = configparser.ConfigParser()
self.config.read(self.config_file_name)
|
995,496 | 9ca1618510be5446a03febe6833a824696f58a88 | from django.conf import urls
from plzmore.core import views
urlpatterns = [
urls.url(
r'^video/(?P<plzid>[A-Za-z0-9\-\_]{11})/$',
views.StreamView.as_view()
),
urls.url(
r'^torrent/upload/$',
views.UploadTorrentView.as_view()
),
]
|
995,497 | 4108e709859e7391e4db1f60b9c234e1ec7d0216 | ll=[1,2,3,87,98]
ss=""
flt=[]
for i in ll:
if isinstance(i,int):
ll.append(i)
if isinstance(i,str):
ss +=i
if isinstance(i,float):
flt.append(i)
print ll
print ss
print flt
|
995,498 | 0516e6a13d93d26fe5d5cf70faf0a7f5b2b552d3 | def simpleanimation():
import vcs, cdms2, sys
x = vcs.init()
f = cdms2.open(vcs.sample_data+"/clt.nc")
v = f["clt"]
dv3d = vcs.get3d_scalar()
x.plot( v, dv3d )
x.interact()
def simplevector():
import vcs, cdms2, sys
x = vcs.init()
f = cdms2.open(vcs.sample_data+"/clt.nc")
v = f["v"]
u = f["u"]
dv3d = vcs.get3d_vector()
dv3d.BasemapOpacity = 0.15
x.plot( u, v, dv3d )
x.interact()
def simplevolume():
import vcs, cdms2, sys
x = vcs.init()
f = cdms2.open(vcs.sample_data+"/geos5-sample.nc")
u = f["uwnd"]
dv3d = vcs.get3d_scalar()
dv3d.VerticalScaling = 3.0
dv3d.ScaleOpacity = [0.0, 0.8]
dv3d.ScaleColormap = [-46.0, 45, 1]
dv3d.ScaleTransferFunction = [8.6, 76.7, 1]
dv3d.BasemapOpacity = [0.5]
dv3d.XSlider = vcs.off
dv3d.ZSlider = vcs.off
dv3d.YSlider = vcs.off
dv3d.ToggleVolumePlot = vcs.on
dv3d.ToggleSurfacePlot = vcs.off
dv3d.Camera={'Position': (-161, -171, 279), 'ViewUp': (.29, 0.67, 0.68), 'FocalPoint': (146.7, 8.5, -28.6)}
x.plot( u, dv3d )
x.interact()
def run_scalar_ctest( filename, varname, parms, template = "default" ):
import vcs, cdms2
x = vcs.init()
f = cdms2.open(vcs.sample_data+"/"+filename )
v = f[varname]
dv3d = vcs.get3d_scalar( template )
for item in list(parms.items()):
dv3d.setParameter( item[0], item[1] )
x.plot( v, dv3d )
x.interact()
def ctest_as_script():
import vcs
parameters = {
"ScaleColormap": [89.13197640956652, 100.0, 1],
"ScaleOpacity": [1.0, 1.0],
"BasemapOpacity": [0.5],
"Animation": [0.0],
"ZSlider": ( [0.2833581758795678], vcs.on ),
"YSlider": [-90.0],
"ToggleVolumePlot": ( [[1]], vcs.on ),
"XSlider": [-180.0],
"axes": [['xyt']],
"IsosurfaceValue": [50.0],
"VerticalScaling": [1.0],
"ScaleTransferFunction": ( [88.42048588004492, 100.0, 1], vcs.on ),
"Camera": {'cell': (0, 0), 'Position': (-510.89793108644596, -99.49403616328722, 499.57693223045857), 'ViewUp': (0.6679428060896573, 0.18703087988580122, 0.7203276044705059), 'FocalPoint': (0.0, 0.0, 0.0)},
"XSlider": [-180.0],
"YSlider": [-90.0],
"ZSlider": ( [0.2833581758795678], vcs.on ),
"ToggleVolumePlot": ( [[1]], vcs.on ),
}
run_scalar_ctest( "clt.nc", "clt", parameters, 'Hovmoller3D' )
if __name__ == "__main__":
ctest_as_script()
|
995,499 | 486f576ad2ffa1d4e9bacd78aa993cd237e4fe17 | '''
Packages needed in default aws slack command blueprint
'''
import boto3, json, logging, os
'''
Packages non-native to lambda
Required to by installed, zipped with app, uploaded to lambda
'''
import req
'''
Environment variable decryption
'''
from base64 import b64decode
from urlparse import parse_qs
ENCRYPTED_EXPECTED_TOKEN = os.environ['kmsEncryptedToken']
kms = boto3.client('kms')
expected_token = kms.decrypt(CiphertextBlob=b64decode(ENCRYPTED_EXPECTED_TOKEN))['Plaintext']
'''
Logger setup
'''
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
params = parse_qs(event['body'])
token = params['token'][0]
if token != expected_token:
logger.error("Request token (%s) does not match expected", token)
return respond(Exception('Invalid request token'))
user = params['user_name'][0]
command = params['command'][0]
channel = params['channel_name'][0]
command_text = params['text'][0]
return respond(None, "%s invoked %s in %s with the following text: %s" % (user, command, channel, command_text))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.