blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
383ab46399c4da95cbbc9c2295877544ed3388af | 6d8829528bcd7c77cac5ad5f068892e59b8d4b3e | /mainsite/admin.py | 0e5aa49d92b70c684a7c6712d69c9f3bd8ced7f9 | [] | no_license | julietxiao/StockBlog | c59bc32fb65807d76d259d430eaabbccd87bf54d | 1c96ed7d66fbe4c958efeab2ac04d6e26b04a0a1 | refs/heads/master | 2022-12-11T14:24:55.369247 | 2020-08-12T16:30:23 | 2020-08-12T16:30:23 | 162,087,411 | 2 | 1 | null | 2022-12-08T01:20:35 | 2018-12-17T06:53:21 | Python | UTF-8 | Python | false | false | 404 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Stock
# from .models import Stock_change
class StockAdmin(admin.ModelAdmin):
list_display = ('name', 'code', 'industry', 'area', 'price_change', 'p_change','pe','gpr', 'npr')
admin.site.register(Stock, StockAdmin)
# admin.site.register(Stock_change) | [
"julietxiao99@qq.com"
] | julietxiao99@qq.com |
107874511c5b845e873102ab554219fccc15903d | 9c263c3c4075a9eb7ddc6044efa6923bedce285c | /src/V-detector/deployment phase2 V_detectors implementation/Vdetector.py | 76640b130efbc0cd82d847f691595e6461ff853c | [
"MIT"
] | permissive | minzhekang/NSA-VdetectorPython | c783bf3b16dd47d4cbf38c7ff7840044b0db6648 | ff7f692be44ae5ca9c85fed1da1b8d75c60ec662 | refs/heads/master | 2020-06-18T13:46:26.179253 | 2019-08-07T08:20:32 | 2019-08-07T08:20:32 | 196,322,358 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,843 | py | import pandas as pd
import numpy as np
import random
from tqdm import tqdm
pd.set_option('display.expand_frame_repr', False)
file = "Training.csv"
data = pd.read_csv(file)
NormAttList = data["Normal/Attack"]
data = data.drop(["Normal/Attack"], axis= 1) # only the numerical variables
Nself = len(data.values) # gives you the total number of data
radius_self = 0.00 # self radius of normal data
def euc_distance(array1, array2): # euclidean function
return np.power(np.sum((array1 - array2)**2, axis = 1) , 0.5)
def gen_detectors():
listarray = []
listarray.append(random.random())
listarray.append(random.random())
return np.array(listarray)
N = 100 # number of detectors
counter = 0
D = [x for x in range(N)]
Dradius = [x for x in range(N)]
pbar = tqdm(total=N, initial = 0, desc= "Generating detectors!") # progress-bar
while counter < N:
detector = gen_detectors()
distance_list = list(euc_distance(data, detector)) # calculates in a large array
dmin = np.min(distance_list) # calculates the minimum distance between current detector and all the data
detector_radius = dmin-radius_self # minus away the radius_self to ensure it doesnt overlap
if dmin > radius_self:
D[counter] = detector
Dradius[counter] = detector_radius
counter += 1
pbar.update(1)
pbar.close()
#############################Test Phase#####################################
print("Initializing test phase...")
file2 = "Attack.csv"
data2 = pd.read_csv(file2)
NormAttListTest = data2["Normal/Attack"]
data2 = data2.drop(["Normal/Attack"], axis = 1) #axis = 1 refers to the row
FP=0
FN=0
TP=0
TN=0
AttListTest = np.array([val == 'Attack' for val in NormAttListTest]) # this prints boolean values for which the data is "Attack"
NormListTest = np.array([val == 'Normal' for val in NormAttListTest])
D = np.array(D)
distance_test = np.array([euc_distance(val, D) - Dradius for val in data2.values]) # distance of detector and test point minus the detector radius
#distance_test = euc_distance2(data2.values[:, None, :], D[None, :, :]) - Dradius # broadcasting results in memory error
print("Evaluating minimum distance!")
#distance_test_min = np.array([np.min(val) for val in distance_test])
distance_test_min = np.min(distance_test, axis = 1)
print("Calculated")
TP += np.sum( (distance_test_min < 0) & AttListTest)
FN += np.sum( (distance_test_min > 0) & AttListTest)
FP += np.sum( (distance_test_min < 0) & NormListTest)
TN += np.sum( (distance_test_min > 0) & NormListTest)
print("Total number of data in test = ", len(data2.values))
print("Total number of data checked = ", FN+TN+FP+TP)
print("Number of Attacks = ",np.sum(AttListTest))
print("Number of Normal = ",np.sum(NormListTest))
print("Detection Rate = ",TP/(TP+FN))
print("False Alarm Rate = ",FP/(FP+TN))
print("TP = ",TP)
print("FN = ",FN)
print("FP = ",FP)
print("TN = ",TN)
| [
"36184648+axois@users.noreply.github.com"
] | 36184648+axois@users.noreply.github.com |
642c7ba9052302594420824eb4b4203a42de46b6 | 81894c54ca2e964394083b14824a639d71333d1f | /eth_tool/chain.py | e6c137d29d19770c839931ade53a9a5283a769df | [] | no_license | l3n641/eth_tool | 958694204bbcd9814350222fcfc0f921604c3d34 | e05a71fcbce3b113d3eaff4f28e4d91e465f4eee | refs/heads/master | 2022-12-12T11:05:42.948188 | 2018-10-21T11:29:13 | 2018-10-21T11:29:13 | 154,000,862 | 0 | 0 | null | 2022-12-08T01:15:41 | 2018-10-21T11:27:20 | Python | UTF-8 | Python | false | false | 2,187 | py | import os
from eth_tool import web3_connector
from eth_tool.block import Block
class Chain:
def get_address_by_path(self, path="./"):
"""
根据文件路径获取地址
:param path:
:return:
"""
if not os.path.exists(path):
return []
address_file = [file_name for file_name in os.listdir(path) if os.path.isfile(file_name)]
address_list = []
for file_name in address_file:
*other, address = file_name.split("--")
address = "0x" + address
address_list.append(address)
return address_list
def get_last_block(self):
"""
获取最新的区块
:return:
"""
block_info = web3_connector.eth.getBlock("latest")
return block_info
def get_accounts(self):
return web3_connector.eth.accounts
def get_block_number(self):
"""
返回当前区块号
:return:
"""
data = web3_connector.eth.blockNumber
return data
def get_block_by_number(self, number):
"""
根据区块号获取区块
:param number: 区块号
:return: Block
"""
block = Block(number)
return block
def get_block_by_hash(self, block_hash):
"""
根据block_hash获取区块
:param block_hash:
:return:
"""
try:
block_info = web3_connector.getBlock(block_hash)
return self.get_block_by_number(block_info['number'])
except Exception:
raise Exception("NOT FOUND BLOCK BY %s" % (block_hash))
def has_block(self, block):
"""
判断是否存在该区块
:param block: 区块hash 或者 区块号
:return:
"""
try:
web3_connector.getBlock(block)
return True
except Exception:
raise False
def get_transaction_receipt(self, transaction_hash):
"""
根据交易hash获取交易详情
:param transaction_hash:
:return:
"""
return web3_connector.eth.getTransactionReceipt(transaction_hash)
| [
"371522155@qq.com"
] | 371522155@qq.com |
ecc798e7483e7c569341b1c6e8f1e83ac14af490 | fb03272d54495fbc3735129e627fbca540579ca4 | /libpysat/derived/m3/__init__.py | 8a4b8e8b26a9d33b3157a497d27bc337124a8548 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer",
"LicenseRef-scancode-public-domain"
] | permissive | Kelvinrr/PySAT | f7953ebddeeec7a46274deb7d037ed2976c2c12a | ef0a197df33b33ead0c872ac1069bf9622520503 | refs/heads/master | 2021-09-20T12:34:50.138609 | 2018-07-26T14:19:16 | 2018-07-26T14:19:16 | 114,694,397 | 0 | 0 | null | 2017-12-18T22:34:26 | 2017-12-18T22:34:25 | null | UTF-8 | Python | false | false | 46 | py | from . import pipe, development, supplemental
| [
"jlaura@usgs.gov"
] | jlaura@usgs.gov |
f9106238b4ff20bec1e7b0835e8bd33b0db2acf4 | 8e69eee9b474587925e22413717eb82e4b024360 | /v1.0.0.test/toontown/uberdog/DistributedDataStoreManagerUD.py | a96c02427ed6514633f660973bad639449564c3f | [
"MIT"
] | permissive | TTOFFLINE-LEAK/ttoffline | afaef613c36dc3b70514ccee7030ba73c3b5045b | bb0e91704a755d34983e94288d50288e46b68380 | refs/heads/master | 2020-06-12T15:41:59.411795 | 2020-04-17T08:22:55 | 2020-04-17T08:22:55 | 194,348,185 | 5 | 4 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectUD import DistributedObjectUD
class DistributedDataStoreManagerUD(DistributedObjectUD):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedDataStoreManagerUD')
def startStore(self, todo0):
pass
def stopStore(self, todo0):
pass
def queryStore(self, todo0, todo1):
pass
def receiveResults(self, todo0, todo1):
pass
def deleteBackupStores(self):
pass | [
"s0mberdemise@protonmail.com"
] | s0mberdemise@protonmail.com |
5a573cd458380500dee46da0c99d7b409b37357f | ae2636686480b01eabf059fdeeef2f40ebd85633 | /awsutils/iamRolesV1.py | de03a4c33177381294353f7391529e62a4ab27bb | [] | no_license | munireddy/setupInstructions | 3f228c5651488efa59405f1269b4705915dc9a4e | 5710af90ce0e18a04951c3489e5ef5ddcec3dec0 | refs/heads/master | 2022-07-04T06:21:01.632450 | 2022-06-07T21:30:44 | 2022-06-07T21:30:44 | 211,887,816 | 0 | 2 | null | 2020-10-24T21:34:10 | 2019-09-30T15:07:23 | Python | UTF-8 | Python | false | false | 3,547 | py | import boto3
import csv
#client = boto3.client('iam',aws_access_key_id="XXXX",aws_secret_access_key="YYY")
client = boto3.client('iam')
users = client.list_users()
user_list = []
ofh= open("userlist.csv", "w")
fieldnames = ['userName', 'Groups', 'Policies','isMFADeviceConfigured']
writer = csv.DictWriter(ofh, fieldnames=fieldnames)
writer.writeheader()
for key in users['Users']:
result = {}
Policies = []
Groups=[]
result['userName']=key['UserName']
k1 = key['UserName']
List_of_Policies = client.list_user_policies(UserName=key['UserName'])
print(type(List_of_Policies['PolicyNames']))
List_of_Policies1 = client.list_attached_user_policies(UserName=key['UserName'])
# if List_of_Policies1:
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!###############################################################################")
# print(List_of_Policies1)
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!###############################################################################")
# print(List_of_Policies1['AttachedPolicies'][0]['PolicyName'])
# print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!###############################################################################")
# print(type(List_of_Policies1['AttachedPolicies']))
#for item in List_of_Policies1['AttachedPolicies']:
# print(item)
# Need to read the policies attached from group. For this extract the group assoicated with the user first
# Then iterate over the groups and ginf policies.
# Finally append the policies to List_of_Policies['PolicyNames']
List_of_Groups = client.list_groups_for_user(UserName=key['UserName'])
for Group in List_of_Groups['Groups']:
Groups.append(Group['GroupName'])
result['Groups'] = Groups
if Groups:
print("List of group Policies ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^")
for x in Groups:
List_of_Group_Policies = client.list_attached_group_policies(GroupName=x)
print (List_of_Group_Policies)
for y in List_of_Group_Policies['AttachedPolicies']:
List_of_Policies['PolicyNames'].append(y['PolicyName'])
if not List_of_Policies1:
result['Policies'] = List_of_Policies['PolicyNames']
print("###############################################################################")
else:
for x in List_of_Policies1['AttachedPolicies']:
#print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
#print(x)
#print(x['PolicyName'])
List_of_Policies['PolicyNames'].append(x['PolicyName'])
result['Policies'] = List_of_Policies['PolicyNames']
#print('****************************************************')
print (result['Policies'])
List_of_MFA_Devices = client.list_mfa_devices(UserName=key['UserName'])
if not len(List_of_MFA_Devices['MFADevices']):
result['isMFADeviceConfigured']=False
else:
result['isMFADeviceConfigured']=True
user_list.append(result)
# print(result)
for k,v in result.items():
if v :
#print(v)
pass
else:
result[k] = None
#writer.writerow(result['userName'],result['Groups'], result['Policies'], result['isMFADeviceConfigured'])
#print(result)
writer.writerow(result)
for key in user_list:
# print (key)
pass
#for k, v in result.items():
# writer.writerow([k, v])
| [
"noreply@github.com"
] | noreply@github.com |
f73c69327f9a0808a5342429f537d9f3327594c9 | cec7315a6e86aece2b1fbc5c471aafca3288bfc2 | /backend/manage.py | d8e871a95646132dff05fabdcd33238d8bf93a36 | [] | no_license | crowdbotics-apps/broken-art-29088 | c783b82a52483e5bec52893bdae01c1849095e44 | b240934504f1aba03821ff07b336298fb98e6ca6 | refs/heads/master | 2023-06-25T17:31:47.595217 | 2021-07-23T21:38:46 | 2021-07-23T21:38:46 | 388,932,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'broken_art_29088.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
0c65b40ef58177a2d7d43bc3c5ca39893a55f433 | 30b387d7e32f75507fa4247bae944371c3f9bf3f | /python/rocrand/rocrand/rocrand.py | 59a2f4f3f016a2730d4b5dccb8a04dd4164c35e3 | [
"GPL-3.0-only",
"MIT"
] | permissive | fangbaohui/rocRAND | 961ceb1f6e9a49afaf2ccce1948f7e31451a0f5e | 32eb20820fad35bbf8d73830c93521b9192051b2 | refs/heads/master | 2022-07-06T19:58:31.881736 | 2020-05-11T21:53:32 | 2020-05-11T21:53:32 | 263,795,899 | 1 | 0 | MIT | 2020-05-14T02:33:31 | 2020-05-14T02:33:30 | null | UTF-8 | Python | false | false | 15,131 | py | # Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""rocRAND Python Wrapper"""
import os
import ctypes
import ctypes.util
from ctypes import *
import numbers
import numpy as np
from .hip import load_hip, HIP_PATHS
from .hip import empty, DeviceNDArray, device_pointer
from .utils import find_library, expand_paths
from .finalize import track_for_finalization
rocrand = None
ROCRAND_PATHS = [
os.getenv("ROCRAND_PATH")
] + expand_paths(HIP_PATHS, ["", "rocrand"])
def load_rocrand():
global rocrand
try:
rocrand = CDLL(find_library(ROCRAND_PATHS, "librocrand.so"))
except OSError as e:
raise ImportError("librocrand.so cannot be loaded: " + str(e))
load_hip()
load_rocrand()
ROCRAND_RNG_PSEUDO_DEFAULT = 400
ROCRAND_RNG_PSEUDO_XORWOW = 401
ROCRAND_RNG_PSEUDO_MRG32K3A = 402
ROCRAND_RNG_PSEUDO_MTGP32 = 403
ROCRAND_RNG_PSEUDO_PHILOX4_32_10 = 404
ROCRAND_RNG_QUASI_DEFAULT = 500
ROCRAND_RNG_QUASI_SOBOL32 = 501
ROCRAND_STATUS_SUCCESS = 0
ROCRAND_STATUS_VERSION_MISMATCH = 100
ROCRAND_STATUS_NOT_CREATED = 101
ROCRAND_STATUS_ALLOCATION_FAILED = 102
ROCRAND_STATUS_TYPE_ERROR = 103
ROCRAND_STATUS_OUT_OF_RANGE = 104
ROCRAND_STATUS_LENGTH_NOT_MULTIPLE = 105
ROCRAND_STATUS_DOUBLE_PRECISION_REQUIRED = 106
ROCRAND_STATUS_LAUNCH_FAILURE = 107
ROCRAND_STATUS_INTERNAL_ERROR = 108
ROCRAND_STATUS = {
ROCRAND_STATUS_SUCCESS: (
"ROCRAND_STATUS_SUCCESS",
"Success"),
ROCRAND_STATUS_VERSION_MISMATCH: (
"ROCRAND_STATUS_VERSION_MISMATCH",
"Header file and linked library version do not match"),
ROCRAND_STATUS_NOT_CREATED: (
"ROCRAND_STATUS_NOT_CREATED",
"Generator was not created using rocrand_create_generator"),
ROCRAND_STATUS_ALLOCATION_FAILED: (
"ROCRAND_STATUS_ALLOCATION_FAILED",
"Memory allocation failed during execution"),
ROCRAND_STATUS_TYPE_ERROR: (
"ROCRAND_STATUS_TYPE_ERROR",
"Generator type is wrong"),
ROCRAND_STATUS_OUT_OF_RANGE: (
"ROCRAND_STATUS_OUT_OF_RANGE",
"Argument out of range"),
ROCRAND_STATUS_LENGTH_NOT_MULTIPLE: (
"ROCRAND_STATUS_LENGTH_NOT_MULTIPLE",
"Length requested is not a multiple of dimension"),
ROCRAND_STATUS_DOUBLE_PRECISION_REQUIRED: (
"ROCRAND_STATUS_DOUBLE_PRECISION_REQUIRED",
"GPU does not have double precision"),
ROCRAND_STATUS_LAUNCH_FAILURE: (
"ROCRAND_STATUS_LAUNCH_FAILURE",
"Kernel launch failure"),
ROCRAND_STATUS_INTERNAL_ERROR: (
"ROCRAND_STATUS_INTERNAL_ERROR",
"Internal library error")
}
def check_rocrand(status):
if status != ROCRAND_STATUS_SUCCESS:
raise RocRandError(status)
class RocRandError(Exception):
"""Run-time rocRAND error."""
def __init__(self, value):
self.value = value
def __str__(self):
if self.value in ROCRAND_STATUS:
v, s = ROCRAND_STATUS[self.value]
else:
v, s = str(self.value), "Unknown error"
return "{} ({})".format(s, v)
class RNG(object):
"""Random number generator base class."""
def __init__(self, rngtype, offset=None, stream=None):
self._gen = c_void_p()
check_rocrand(rocrand.rocrand_create_generator(byref(self._gen), rngtype))
track_for_finalization(self, self._gen, RNG._finalize)
self._offset = 0
if offset is not None:
self.offset = offset
self._stream = None
if stream is not None:
self.stream = stream
@classmethod
def _finalize(cls, gen):
check_rocrand(rocrand.rocrand_destroy_generator(gen))
@property
def offset(self):
"""Mutable attribute of the offset of random numbers sequence.
Setting this attribute resets the sequence.
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Mutable attribute of HIP stream for all kernel launches of the generator.
All functions will use this stream.
*None* means default stream.
"""
check_rocrand(rocrand.rocrand_set_offset(self._gen, c_ulonglong(offset)))
self._offset = offset
@property
def stream(self):
return self._stream
@stream.setter
def stream(self, stream):
check_rocrand(rocrand.rocrand_set_stream(self._gen, stream))
self._stream = stream
def _generate(self, gen_func, ary, size, *args):
if size is not None:
if size > ary.size:
raise ValueError("requested size is greater than ary")
else:
size = ary.size
if isinstance(ary, np.ndarray):
dary, needs_conversion = empty(size, ary.dtype), True
elif isinstance(ary, DeviceNDArray):
dary, needs_conversion = ary, False
else:
raise TypeError("unsupported type {}".format(type(ary)))
check_rocrand(gen_func(self._gen, device_pointer(dary), c_size_t(size), *args))
if needs_conversion:
dary.copy_to_host(ary)
def generate(self, ary, size=None):
"""Generates uniformly distributed integers.
Generates **size** (if present) or **ary.size** uniformly distributed
integers and saves them to **ary**.
Supported **dtype** of **ary**: :class:`numpy.uint32`, :class:`numpy.int32`.
:param ary: NumPy array (:class:`numpy.ndarray`) or
HIP device-side array (:class:`DeviceNDArray`)
:param size: Number of samples to generate, default to **ary.size**
"""
if ary.dtype in (np.uint32, np.int32):
self._generate(
rocrand.rocrand_generate,
ary, size)
else:
raise TypeError("unsupported type {}".format(ary.dtype))
def uniform(self, ary, size=None):
"""Generates uniformly distributed floats.
Generates **size** (if present) or **ary.size** uniformly distributed
floats and saves them to **ary**.
Supported **dtype** of **ary**: :class:`numpy.float32`, :class:`numpy.float64`.
Generated numbers are between 0.0 and 1.0, excluding 0.0 and
including 1.0.
:param ary: NumPy array (:class:`numpy.ndarray`) or
HIP device-side array (:class:`DeviceNDArray`)
:param size: Number of samples to generate, default to **ary.size**
"""
if ary.dtype == np.float32:
self._generate(
rocrand.rocrand_generate_uniform,
ary, size)
elif ary.dtype == np.float64:
self._generate(
rocrand.rocrand_generate_uniform_double,
ary, size)
else:
raise TypeError("unsupported type {}".format(ary.dtype))
def normal(self, ary, mean, stddev, size=None):
"""Generates normally distributed floats.
Generates **size** (if present) or **ary.size** normally distributed
floats and saves them to **ary**.
Supported **dtype** of **ary**: :class:`numpy.float32`, :class:`numpy.float64`.
:param ary: NumPy array (:class:`numpy.ndarray`) or
HIP device-side array (:class:`DeviceNDArray`)
:param mean: Mean value of normal distribution
:param stddev: Standard deviation value of normal distribution
:param size: Number of samples to generate, default to **ary.size**
"""
if ary.dtype == np.float32:
self._generate(
rocrand.rocrand_generate_normal,
ary, size,
c_float(mean), c_float(stddev))
elif ary.dtype == np.float64:
self._generate(
rocrand.rocrand_generate_normal_double,
ary, size,
c_double(mean), c_double(stddev))
else:
raise TypeError("unsupported type {}".format(ary.dtype))
def lognormal(self, ary, mean, stddev, size=None):
"""Generates log-normally distributed floats.
Generates **size** (if present) or **ary.size** log-normally distributed
floats and saves them to **ary**.
Supported **dtype** of **ary**: :class:`numpy.float32`, :class:`numpy.float64`.
:param ary: NumPy array (:class:`numpy.ndarray`) or
HIP device-side array (:class:`DeviceNDArray`)
:param mean: Mean value of log normal distribution
:param stddev: Standard deviation value of log normal distribution
:param size: Number of samples to generate, default to **ary.size**
"""
if ary.dtype == np.float32:
self._generate(
rocrand.rocrand_generate_log_normal,
ary, size,
c_float(mean), c_float(stddev))
elif ary.dtype == np.float64:
self._generate(
rocrand.rocrand_generate_log_normal_double,
ary, size,
c_double(mean), c_double(stddev))
else:
raise TypeError("unsupported type {}".format(ary.dtype))
def poisson(self, ary, lmbd, size=None):
"""Generates Poisson-distributed integers.
Generates **size** (if present) or **ary.size** Poisson-distributed
integers and saves them to **ary**.
Supported **dtype** of **ary**: :class:`numpy.uint32`, :class:`numpy.int32`.
:param ary: NumPy array (:class:`numpy.ndarray`) or
HIP device-side array (:class:`DeviceNDArray`)
:param lmbd: lambda for the Poisson distribution
:param size: Number of samples to generate, default to **ary.size**
"""
if ary.dtype in (np.uint32, np.int32):
self._generate(
rocrand.rocrand_generate_poisson,
ary, size,
c_double(lmbd))
else:
raise TypeError("unsupported type {}".format(ary.dtype))
class PRNG(RNG):
"""Pseudo-random number generator.
Example::
import rocrand
import numpy as np
gen = rocrand.PRNG(rocrand.PRNG.PHILOX4_32_10, seed=123456)
a = np.empty(1000, np.int32)
gen.poisson(a, 10.0)
print(a)
"""
DEFAULT = ROCRAND_RNG_PSEUDO_DEFAULT
"""Default pseudo-random generator type, :const:`XORWOW`"""
XORWOW = ROCRAND_RNG_PSEUDO_XORWOW
"""XORWOW pseudo-random generator type"""
MRG32K3A = ROCRAND_RNG_PSEUDO_MRG32K3A
"""MRG32k3a pseudo-random generator type"""
MTGP32 = ROCRAND_RNG_PSEUDO_MTGP32
"""Mersenne Twister MTGP32 pseudo-random generator type"""
PHILOX4_32_10 = ROCRAND_RNG_PSEUDO_PHILOX4_32_10
"""PHILOX_4x32 (10 rounds) pseudo-random generator type"""
def __init__(self, rngtype=DEFAULT, seed=None, offset=None, stream=None):
"""__init__(self, rngtype=DEFAULT, seed=None, offset=None, stream=None)
Creates a new pseudo-random number generator.
A new pseudo-random number generator of type **rngtype** is initialized
with given **seed**, **offset** and **stream**.
Values of **rngtype**:
* :const:`DEFAULT`
* :const:`XORWOW`
* :const:`MRG32K3A`
* :const:`MTGP32`
* :const:`PHILOX4_32_10`
:param rngtype: Type of pseudo-random number generator to create
:param seed: Initial seed value
:param offset: Initial offset of random numbers sequence
:param stream: HIP stream for all kernel launches of the generator
"""
super(PRNG, self).__init__(rngtype, offset=offset, stream=stream)
self._seed = None
if seed is not None:
self.seed = seed
@property
def seed(self):
"""Mutable attribute of the seed of random numbers sequence.
Setting this attribute resets the sequence.
"""
return self._seed
@seed.setter
def seed(self, seed):
check_rocrand(rocrand.rocrand_set_seed(self._gen, c_ulonglong(seed)))
self._seed = seed
class QRNG(RNG):
"""Quasi-random number generator.
Example::
import rocrand
import numpy as np
gen = rocrand.QRNG(rocrand.QRNG.SOBOL32, ndim=4)
a = np.empty(1000, np.float32)
gen.normal(a, 0.0, 1.0)
print(a)
"""
DEFAULT = ROCRAND_RNG_QUASI_DEFAULT
"""Default quasi-random generator type, :const:`SOBOL32`"""
SOBOL32 = ROCRAND_RNG_QUASI_SOBOL32
"""Sobol32 quasi-random generator type"""
def __init__(self, rngtype=DEFAULT, ndim=None, offset=None, stream=None):
"""__init__(self, rngtype=DEFAULT, ndim=None, offset=None, stream=None)
Creates a new quasi-random number generator.
A new quasi-random number generator of type **rngtype** is initialized
with given **ndim**, **offset** and **stream**.
Values of **rngtype**:
* :const:`DEFAULT`
* :const:`SOBOL32`
Values if **ndim** are 1 to 20000.
:param rngtype: Type of quasi-random number generator to create
:param ndim: Number of dimensions
:param offset: Initial offset of random numbers sequence
:param stream: HIP stream for all kernel launches of the generator
"""
super(QRNG, self).__init__(rngtype, offset=offset, stream=stream)
self._ndim = 1
if ndim is not None:
self.ndim = ndim
@property
def ndim(self):
"""Mutable attribute of the number of dimensions of random numbers sequence.
Supported values are 1 to 20000.
Setting this attribute resets the sequence.
"""
return self._ndim
@ndim.setter
def ndim(self, ndim):
check_rocrand(rocrand.rocrand_set_quasi_random_generator_dimensions(self._gen, c_uint(ndim)))
self._ndim = ndim
def get_version():
"""Returns the version number of the rocRAND library."""
version = c_int(0)
check_rocrand(rocrand.rocrand_get_version(byref(version)))
return version.value
| [
"anton@streamcomputing.eu"
] | anton@streamcomputing.eu |
3340ab19cd6e27d561259c17962122f7ca5afbb5 | baed2c2da1f776c0968d3cacd2fa45bdbe5482d6 | /ZOS_API_scripts/LAT_analysis/focal_plane_strehl_ratios_CD.py | 4531fc83e94b0eca7735eed469b8422d1bd1be10 | [] | no_license | patogallardo/zemax_tools | 5ae2fe9a1e8b032684b8cf57457ee4f3239d9141 | 90d309c2f96c94469963eb905844d76fa2137bf9 | refs/heads/master | 2023-01-08T22:52:16.865852 | 2022-12-20T21:36:28 | 2022-12-20T21:36:28 | 234,634,525 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,881 | py | import pandas as pd
import numpy as np
import glob
import matplotlib.pyplot as plt
import scipy.interpolate as interp
from scipy import stats
import os
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
assert len(sys.argv) == 2
plt.rcParams.update({'font.size': 14})
s = pd.read_hdf('ray_db.hdf', 'system_variables')
center_field_deg = [s.center_field_x, s.center_field_y]
overlay_circle = True
rs = [2200/2] # radii for circles overlay
assert len(sys.argv) == 2
strehl_map_fname = "strehl_map_wl_%s.hdf" % sys.argv[1]
print(strehl_map_fname)
def get_field_positions_and_strehl_map_fname():
'''find 2 databases in current folder.
field positions and strehls.
if there are more than one file per flavor of database it will
raise an error.'''
field_position_fnames = glob.glob('ray_db.hdf')
strehl_fnames = glob.glob(strehl_map_fname)
field_position_fnames.sort()
strehl_fnames.sort()
print(field_position_fnames)
print(strehl_fnames)
assert len(field_position_fnames) == len(strehl_fnames)
assert len(field_position_fnames) == 1
assert len(strehl_fnames) == 1
pos_fname, strehl_fname = field_position_fnames[0], strehl_fnames[0]
print('Analyzing the following files:')
print("Focal plane positions: ", pos_fname)
print("Strehl maps: ", strehl_fname)
s = pd.read_hdf('ray_db.hdf', 'system_variables')
projectName = s.project_name
print('project name: %s' % projectName)
return pos_fname, strehl_fname, projectName
pos_fname, strehl_fname, projectName = get_field_positions_and_strehl_map_fname() # noqa
def interpolate_vignetting_for_strehls(xx, yy, vig):
'''Receives the xx, yy grid in angle, and their vignetting flag.
Figures out if rays are vignetted or not and returns an interpolator
function'''
dim = int(np.sqrt(len(xx)))
x = np.reshape(xx, (dim, dim))[0, :]
y = np.reshape(yy, (dim, dim))[:, 0]
z = vig.reshape([dim, dim]) * 1.0 # float conversion
u = interp.RegularGridInterpolator(points=(x, y),
values=z.swapaxes(0, 1),
method='linear',
bounds_error=False)
return u
class open_databases:
'''object containing raytrace dataframe split by marginal rays
'''
def __init__(self):
projectInfo = get_field_positions_and_strehl_map_fname()
self.pos_fname, strehl_fnem, projectName = projectInfo
df_rays = pd.read_hdf(self.pos_fname, key='df')
df_rays['hx_deg'] = df_rays['hx_deg'] - center_field_deg[0]
df_rays['hy_deg'] = df_rays['hy_deg'] - center_field_deg[1]
df_pos = df_rays.query('px == 0 and py == 0', inplace=False)
df_xp = df_rays.query('px==1 and py==0')
df_yp = df_rays.query('px==0 and py==1')
df_xm = df_rays.query('px==-1 and py==0')
df_ym = df_rays.query('px==0 and py==-1')
vig1 = df_xp.vignette_code.values != 0
vig2 = df_yp.vignette_code.values != 0
vig3 = df_xm.vignette_code.values != 0
vig4 = df_ym.vignette_code.values != 0
vig_p = np.logical_or(vig1, vig2)
vig_m = np.logical_or(vig3, vig4)
vig = np.logical_or(vig_p, vig_m)
self.vig = vig
df_pos.x_pos.values[vig] = np.nan
df_pos.y_pos.values[vig] = np.nan
u = interpolate_vignetting_for_strehls(df_pos.hx_deg.values,
df_pos.hy_deg.values,
vig)
df_strh = pd.read_hdf(strehl_fname, key='df')
wl = pd.read_hdf(strehl_fname, key='wavelength').wavelength_um/1e3
df_strh['vignetted'] = u((df_strh.xx_deg.values,
df_strh.yy_deg.values))
self.df_pos = df_pos
self.df_xp = df_xp
self.df_yp = df_yp
self.df_xm = df_xm
self.df_ym = df_ym
self.df_strh = df_strh
self.wavelength = wl
db = open_databases()
def interpolate_grid(df_pos):
dim = int(np.sqrt(len(df_pos))) # requires square grid
xx = df_pos.hx_deg.values
yy = df_pos.hy_deg.values
x = np.reshape(xx, (dim, dim))[0, :]
y = np.reshape(yy, (dim, dim))[:, 0]
zx = df_pos.x_pos.values.reshape([dim, dim])
zy = df_pos.y_pos.values.reshape([dim, dim])
u = interp.RegularGridInterpolator((x, y), zx.swapaxes(0, 1),
bounds_error=False)
v = interp.RegularGridInterpolator((x, y), zy.swapaxes(0, 1),
bounds_error=False)
return u, v
def plotArea_focal_plane(x_mm, y_mm, z_strehl,
thresholds=[0.95],
overlay_circle=False,
rs=[2000, 3000]):
sel = np.logical_not(np.isnan(x_mm))
x, y, z = x_mm[sel], y_mm[sel], z_strehl[sel]
res = stats.binned_statistic_2d(x, y, z, statistic='mean',
range=[[-2000, 2000], [-2000, 2000]],
bins=[100, 100])
x_bin = 0.5*(res.x_edge[:-1] + res.x_edge[1:])
y_bin = 0.5*(res.y_edge[:-1] + res.y_edge[1:])
x_increment, y_increment = np.diff(res.x_edge)[0], np.diff(res.y_edge)[0]
pixel_area = x_increment * y_increment
above_thresholds = [res.statistic > threshold
for threshold in thresholds]
areas = [np.sum(above_threshold) * pixel_area
for above_threshold in above_thresholds]
for j in range(len(thresholds)):
print('Area above Strehl %1.2f: %3.1f [m^2]' % (thresholds[j],
areas[j]/1e6))
# now make the plot
fig, ax = plt.subplots(figsize=[6, 5])
hb = ax.hexbin(x_mm, y_mm, z_strehl, vmin=0.5, vmax=1.0)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(hb, cax=cax,
ticks=np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.0]))
cbar.set_label('Strehl ratio [-]')
contours_ = [0.5, 0.7, 0.8, 0.9, 0.95]
cs = ax.contour(x_bin, y_bin, res.statistic.T, contours_,
cmap='inferno')
ax.clabel(cs, inline=1, fontsize=15, fmt='%1.2f')
if overlay_circle:
theta = np.linspace(0, 2*np.pi, 1000)
for j, r in enumerate(rs):
x = r * np.cos(theta)
y = r * np.sin(theta)
circle_area = np.pi * r**2/1e6 # in m^2
ax.plot(x, y,
label='$r_{\\bigcirc}$= %1.2f m\nA=%1.2f$m^2$' % (r/1000, circle_area), # noqa
color='C%i' %(j+1)) # noqa
ax.legend(loc='lower left', fontsize=8)
ax.set_aspect('equal')
ax.set_xlabel('$x_{\\rm{focal~plane}}$ [mm]')
ax.set_ylabel('$y_{\\rm{focal~plane}}$ [mm]')
x_min, x_max = np.min(x_mm[sel])*1.05, np.max(x_mm[sel])*1.05
y_min, y_max = np.min(y_mm[sel])*1.05, np.max(y_mm[sel])*1.05
ax.set_xlim([x_min, x_max])
ax.set_ylim([y_min, y_max])
ax.set_title('Focal plane Strehl ratio at $\\lambda=1mm$')
# plt.colorbar()
ax.grid(alpha=0.3)
# bubble
texts = ['Area$_{Strehl > %1.2f}$: %1.1fm$^2$' % (thresholds[j], areas[j]/1e6) # noqa
for j in range(len(thresholds))]
textstr = '\n'.join(texts)
props = dict(boxstyle='round', facecolor='white', alpha=1)
plt.figtext(0.63, 0.84, textstr, bbox=props, fontsize=8, alpha=1.0)
plt.figtext(0.9, 0.05, projectName, fontsize=5, ha='right')
if not os.path.exists('./strehls'):
os.mkdir('./strehls')
fig.tight_layout()
plt.savefig('./strehls/focal_plane_strehls_wl_%i_mm.png' % db.wavelength,
dpi=150)
plt.savefig('./strehls/focal_plane_strehls_wl_%i_mm.pdf' % db.wavelength)
plt.close()
def plot_img_qual_sky(db, thresholds=[0.95, 0.90, 0.80]):
df_strh = db.df_strh
sel = df_strh.vignetted == 0
x, y = df_strh.xx_deg.values[sel], df_strh.yy_deg.values[sel]
z = df_strh.z_strehl.values[sel]
res = stats.binned_statistic_2d(x, y, z, statistic='mean',
range=[[-7, 7], [-7, 7]],
bins=[100, 100])
# compute area over thresholds
x_bin = 0.5*(res.x_edge[:-1] + res.x_edge[1:])
y_bin = 0.5*(res.y_edge[:-1] + res.y_edge[1:])
x_increment, y_increment = np.diff(res.x_edge)[0], np.diff(res.y_edge)[0]
pixel_area = x_increment * y_increment #
above_thresholds = [res.statistic > threshold
for threshold in thresholds]
areas = [np.sum(above_threshold) * pixel_area
for above_threshold in above_thresholds]
for j in range(len(thresholds)):
print('Area above Strehl %1.2f: %3.1f [deg^2]' % (thresholds[j],
areas[j]))
# now make the plot
fig, ax = plt.subplots(figsize=[6, 5])
hb = ax.hexbin(x, y, z, vmin=0.0, vmax=1.0,
cmap='viridis')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.05)
cbar = plt.colorbar(hb, cax=cax,
ticks=np.arange(0, 1.1, 0.2))
cbar.set_label('Strehl ratio [-]')
cs = ax.contour(x_bin, y_bin, res.statistic.T,
np.array([0.5, 0.7, 0.8, 0.9, 0.95]),
colors='white')
# cmap='viridis')
ax.clabel(cs, inline=1, fontsize=15)
ax.set_xlabel('$x_{sky}$ [deg]')
ax.set_ylabel('$y_{sky}$ [deg]')
xmax = 5.0
ax.set_xlim([-xmax, xmax])
ax.set_ylim([-xmax, xmax])
ax.set_title('CD Strehl ratio at $\\lambda=%1.1f mm$' % db.wavelength)
ax.grid(alpha=0.3)
# bubble
texts = ['$\\Omega_{Strehl > %1.2f}$: %1.1f deg$^2$' % (thresholds[j],
round(areas[j], 1))
for j in range(len(thresholds))]
textstr = '\n'.join(texts)
props = dict(boxstyle='round', facecolor='white', alpha=0.7)
plt.figtext(0.60, 0.175, textstr, bbox=props, fontsize=8)
# plt.figtext(0.9, 0.05, projectName, fontsize=5, ha='right')
if not os.path.exists('./strehls'):
os.mkdir('./strehls')
fig.tight_layout()
plt.savefig('./strehls/sky_strehls_wl_%i_mm.png' % db.wavelength, dpi=150)
plt.savefig('./strehls/sky_strehls_wl_%i_mm.pdf' % db.wavelength)
plt.close()
u, v = interpolate_grid(db.df_pos)
x_str_deg, y_str_deg = db.df_strh.xx_deg.values, db.df_strh.yy_deg.values
positions_to_eval = np.hstack([x_str_deg[:, np.newaxis],
y_str_deg[:, np.newaxis]])
x_mm = u(positions_to_eval)
y_mm = v(positions_to_eval)
plotArea_focal_plane(x_mm, y_mm, db.df_strh.z_strehl.values,
overlay_circle=overlay_circle,
rs=rs)
plot_img_qual_sky(db)
| [
"26889221+patogallardo@users.noreply.github.com"
] | 26889221+patogallardo@users.noreply.github.com |
0950b6d3e637271c5539b01d2a7dab18a48c6552 | eab6473e49ce562e2b2c605a8c7f0ed7f4acc3f9 | /5_while2.py | 126d23ecddb7df2e6ec89d38aff867b0505c5a00 | [] | no_license | luzanov99/lesson2 | b964feb61169ed7edd0d1f152e03361809fc162e | c4a2d708d9a21d560231ace9f56db98c595f5115 | refs/heads/main | 2023-01-20T14:49:11.750124 | 2020-11-27T09:26:19 | 2020-11-27T09:26:19 | 316,456,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,348 | py | """
Домашнее задание №1
Цикл while: ask_user со словарём
* Создайте словарь типа "вопрос": "ответ", например:
{"Как дела": "Хорошо!", "Что делаешь?": "Программирую"} и так далее
* Напишите функцию ask_user() которая с помощью функции input()
просит пользователя ввести вопрос, а затем, если вопрос есть
в словаре, программа давала ему соотвествующий ответ. Например:
Пользователь: Что делаешь?
Программа: Программирую
"""
questions_and_answers = {"Как дела": "Хорошо!", "Что делаешь?": "Программирую"}
def ask_user(answers_dict):
a=0
user_say=input("Введите вопрос ")
questions=list(questions_and_answers.keys())
while (a<len(questions_and_answers)):
if user_say==questions[a]:
print(questions_and_answers[user_say])
a+=1
if user_say not in questions:
print("Такого вопроса нет,задайте правильно вопрос")
return ask_user(answers_dict)
if __name__ == "__main__":
ask_user(questions_and_answers)
| [
"luzanov.zena@gmail.com"
] | luzanov.zena@gmail.com |
9eb1040ea3b957dc47b47e6af63fd0be78c5c578 | ecd8c17c099b7ee1e365d147562aeb0d3a794b46 | /venv/Lib/site-packages/lenses/optics/true_lenses.py | d789b241f2f1372f4cd9056c26fad1837830a954 | [] | no_license | hikigox/botAmino | 59ffe85b484272404db49c9d93efbff27cd2550c | f6957041975d300b39bf28812d9853f47bf48edf | refs/heads/master | 2023-02-17T23:29:50.466976 | 2021-01-23T03:35:12 | 2021-01-23T03:35:12 | 332,008,449 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,640 | py | from typing import Any
from .. import hooks
from .. import typeclass
from .base import Lens
class ContainsLens(Lens):
'''A lens that takes an item and focuses a bool based on whether
the state contains that item. It's most useful when used with
sets, but it can be used with other collections like lists and
dictionaries. Analogous to the ``in`` operator.
>>> ContainsLens(1)
ContainsLens(1)
>>> ContainsLens(1).view([2, 3])
False
>>> ContainsLens(1).view([1, 2, 3])
True
>>> ContainsLens(1).set([1, 2, 3], False)
[2, 3]
>>> ContainsLens(1).set([2, 3], True)
[2, 3, 1]
>>> ContainsLens(1).set([1, 2, 3], True)
[1, 2, 3]
In order to use this lens on custom data-types you must implement
``lenses.hooks.contains_add`` and ``lens.hooks.contains_remove``.
'''
def __init__(self, item):
self.item = item
def getter(self, state):
return self.item in state
def setter(self, state, focus):
contains = self.item in state
if focus and not contains:
return hooks.contains_add(state, self.item)
elif contains and not focus:
return hooks.contains_remove(state, self.item)
else:
return state
def __repr__(self):
return 'ContainsLens({!r})'.format(self.item)
class GetattrLens(Lens):
'''A lens that focuses an attribute of an object. Analogous to
`getattr`.
>>> GetattrLens('left')
GetattrLens('left')
>>> from collections import namedtuple
>>> Pair = namedtuple('Pair', 'left right')
>>> GetattrLens('left').view(Pair(1, 2))
1
>>> GetattrLens('right').set(Pair(1, 2), 3)
Pair(left=1, right=3)
'''
def __init__(self, name):
# type: (str) -> None
self.name = name
def getter(self, state):
return getattr(state, self.name)
def setter(self, state, focus):
return hooks.setattr_immutable(state, self.name, focus)
def __repr__(self):
return 'GetattrLens({!r})'.format(self.name)
class GetitemLens(Lens):
'''A lens that focuses an item inside a container. Analogous to
`operator.itemgetter`.
>>> GetitemLens('foo')
GetitemLens('foo')
>>> GetitemLens('foo').view({'foo': 1})
1
>>> GetitemLens('foo').set({'foo': 1}, 2)
{'foo': 2}
'''
def __init__(self, key):
# type: (Any) -> None
self.key = key
def getter(self, state):
return state[self.key]
def setter(self, state, focus):
return hooks.setitem_immutable(state, self.key, focus)
def __repr__(self):
return 'GetitemLens({!r})'.format(self.key)
class GetitemOrElseLens(GetitemLens):
'''A lens that focuses an item inside a container by calling its `get`
method, allowing you to specify a default value for missing keys.
Analogous to `dict.get`.
>>> GetitemOrElseLens('foo', 0)
GetitemOrElseLens('foo', default=0)
>>> state = {'foo': 1}
>>> GetitemOrElseLens('foo', 0).view(state)
1
>>> GetitemOrElseLens('baz', 0).view(state)
0
>>> GetitemOrElseLens('foo', 0).set(state, 2)
{'foo': 2}
>>> GetitemOrElseLens('baz', 0).over({}, lambda a: a + 10)
{'baz': 10}
'''
def __init__(self, key, default=None):
# type: (Any, Any) -> None
self.key = key
self.default = default
def getter(self, state):
return state.get(self.key, self.default)
def __repr__(self):
message = 'GetitemOrElseLens({!r}, default={!r})'
return message.format(self.key, self.default)
class ItemLens(Lens):
'''A lens that focuses a single item (key-value pair) in a
dictionary by its key. Set an item to `None` to remove it from the
dictionary.
>>> ItemLens(1)
ItemLens(1)
>>> from collections import OrderedDict
>>> state = OrderedDict([(1, 10), (2, 20)])
>>> ItemLens(1).view(state)
(1, 10)
>>> ItemLens(3).view(state) is None
True
>>> ItemLens(1).set(state, (1, 11))
OrderedDict([(1, 11), (2, 20)])
>>> ItemLens(1).set(state, None)
OrderedDict([(2, 20)])
'''
def __init__(self, key):
# type: (Any) -> None
self.key = key
def getter(self, state):
try:
return self.key, state[self.key]
except KeyError:
return None
def setter(self, state, focus):
data = state.copy()
if focus is None:
del data[self.key]
return data
if focus[0] != self.key:
del data[self.key]
data[focus[0]] = focus[1]
return data
def __repr__(self):
return 'ItemLens({!r})'.format(self.key)
class ItemByValueLens(Lens):
'''A lens that focuses a single item (key-value pair) in a
dictionary by its value. Set an item to `None` to remove it from the
dictionary. This lens assumes that there will only be a single key
with that particular value. If you violate that assumption then
you're on your own.
>>> ItemByValueLens(10)
ItemByValueLens(10)
>>> from collections import OrderedDict
>>> state = OrderedDict([(1, 10), (2, 20)])
>>> ItemByValueLens(10).view(state)
(1, 10)
>>> ItemByValueLens(30).view(state) is None
True
>>> ItemByValueLens(10).set(state, (3, 10))
OrderedDict([(2, 20), (3, 10)])
>>> ItemByValueLens(10).set(state, None)
OrderedDict([(2, 20)])
'''
def __init__(self, value):
self.value = value
def getter(self, state):
for dkey, dvalue in state.items():
if dvalue == self.value:
return dkey, dvalue
def setter(self, state, focus):
data = state.copy()
for key, val in state.items():
if val == self.value:
del data[key]
if focus is not None:
data[focus[0]] = focus[1]
return data
def __repr__(self):
return 'ItemByValueLens({!r})'.format(self.value)
class TupleLens(Lens):
'''A lens that combines the focuses of other lenses into a single
tuple. The sublenses must be optics of kind Lens; this means no
Traversals.
>>> tl = TupleLens(GetitemLens(0), GetitemLens(2))
>>> tl
TupleLens(GetitemLens(0), GetitemLens(2))
>>> tl.view([1, 2, 3, 4])
(1, 3)
>>> tl.set([1, 2, 3, 4], (5, 6))
[5, 2, 6, 4]
This lens is particularly useful when immediately followed by
an EachLens, allowing you to traverse data even when it comes
from disparate locations within the state.
>>> import lenses
>>> each = lenses.optics.EachTraversal()
>>> tee = tl & each & each
>>> state = ([1, 2, 3], 4, [5, 6])
>>> tee.to_list_of(state)
[1, 2, 3, 5, 6]
'''
def __init__(self, *lenses):
self.lenses = lenses
for lens in self.lenses:
if not lens._is_kind(Lens):
raise TypeError('TupleLens only works with lenses')
def getter(self, state):
return tuple(lens.view(state) for lens in self.lenses)
def setter(self, state, focus):
for lens, new_value in zip(self.lenses, focus):
state = lens.set(state, new_value)
return state
def __repr__(self):
args = ', '.join(repr(lens) for lens in self.lenses)
return 'TupleLens({})'.format(args)
| [
"jorgealejandroro@gmail.com"
] | jorgealejandroro@gmail.com |
88a05a7632582fb02d5f9e14c0c74ef4d1f6052d | 2df26b46924dfb691b6acdaca5bad03b193dfae8 | /main_page_logic.py | f23d61103d87c13621984738bb544410bce6f3a2 | [] | no_license | K123AsJ0k1/StoryApp | 708240ac2cd15af26012c75c9e476d598a685448 | d4f98a10ef781e88ba27ea06537b47c21bd7dbcc | refs/heads/main | 2023-04-23T00:23:00.129930 | 2021-05-09T20:53:01 | 2021-05-09T20:53:01 | 349,759,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 868 | py | from app import app
from flask import redirect, render_template, request, session
from users_logic import *
from main_page_logic import *
from profile_logic import *
from workbench_logic import *
from view_logic import *
from comments_logic import *
from query_logic import *
from administration_logic import *
def main_page_proxy_init_problem():
session["main"] = "database_error"
def main_page_other_page_session_deletion():
sign_up_session_deletion()
log_in_session_deletion()
get_post_session_deletion()
get_chapter_session_deletion()
view_chapter_session_deletion()
profile_session_deletion()
workbench_session_deletion()
get_post_session_deletion()
get_chapter_session_deletion()
comment_session_deletion()
query_session_deletion()
admin_session_deletion() | [
"niila.siilasjoki@gmail.com"
] | niila.siilasjoki@gmail.com |
322a6d1f7948d54415674bae1ded0b3b654f09e4 | 4248f299425ed047eb26484666bedaea17166911 | /tonado_ihome/handlers/VerifyCode.py | 15320e38de182cb4aff96aa3f560de138d17acca | [] | no_license | ningCherry/tonado_ihome | c2cd7649e3447ea9d6bfe47ff55d19811b331bda | dad5cd57d191fc7f8d81db22e4e084e14d4b8fe4 | refs/heads/master | 2022-07-01T03:34:43.871938 | 2020-05-11T11:56:43 | 2020-05-11T11:56:43 | 263,028,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,252 | py | from .BaseHandler import BaseHandler
import logging
import io
import re
import random
from utils.captcha.captcha import create_validate_code
from constants import *
from utils.response_code import RET
from utils.yuntongxun.SendTemplateSMS import ccp
class PicCodeHandler(BaseHandler):
# #验证图形验证码
# def get(self, *args, **kwargs):
# # 创建一个文件流
# imgio = io.BytesIO()
# # 生成图片对象和对应字符串
# img, code = create_validate_code()
# # 将图片信息保存到文件流
# img.save(imgio, 'GIF')
# # 返回图片
# self.set_header("Content-Type", "image/jpg")
# self.write(imgio.getvalue())
"""图片验证码"""
def get(self):
"""获取图片验证码"""
pre_code_id = self.get_argument("pre", "")
cur_code_id = self.get_argument("cur","")
# 创建一个文件流
imgio = io.BytesIO()
# 生成图片对象和对应字符串
pic, text= create_validate_code()
# 将图片信息保存到文件流
pic.save(imgio, 'GIF')
try:
if pre_code_id:
self.redis.delete("pic_code_%s" % pre_code_id)
# self.redis.delete("")
# self.redis.setex(name, expries, value)
self.redis.setex("pic_code_%s" % cur_code_id, PIC_CODE_EXPIRES_SECONDS, text)
except Exception as e:
logging.error(e)
self.write("")
else:
self.set_header("Content-Type", "image/jpg")
# 返回图片
self.write(imgio.getvalue())
"""短信验证码"""
class SMSCodeHandler(BaseHandler):
def post(self):
mobile=self.json_args.get("mobile")
piccode=self.json_args.get("piccode")
piccode_id=self.json_args.get("piccode_id")
print(mobile,piccode,piccode_id)
# 参数校验
if not all((mobile,piccode,piccode_id)):
return self.write(dict(errcode=RET.PARAMERR,errmsg='参数缺失'))
if not re.match(r'^1\d{10}$',mobile):
return self.write(dict(errcode=RET.PARAMERR,errmsg='手机号格式错误'))
# 获取图片验证码真实值
global real_piccode
if piccode!='1234': #设置万能图形验证码
try:
real_piccode = self.redis.get("pic_code_%s" % piccode_id)
except Exception as e:
logging.error(e)
self.write(dict(errcode=RET.DBERR, errmsg='查询验证码错误'))
if not real_piccode: # real_piccode要定义为全局变量,不然会报错
return self.write(dict(errcode=RET.NODATA, errmsg="验证码过期"))
# 判断图形验证码正确性
if real_piccode.decode('utf-8').lower() != piccode.lower(): ##redis数据real_piccode要解码
# print(real_piccode.lower())
# print(piccode.lower())
return self.write(dict(errcode=RET.DATAERR, errmsg="验证码错误"))
#检查手机号码是否存在
# sql = "select count(*) counts from ih_user_profile where up_mobile=%s"
# try:
# ret = self.db.get(sql, mobile)
# except Exception as e:
# logging.error(e)
# else:
# if 0 != ret["counts"]:
# return self.write(dict(errcode=RET.DATAEXIST, errmsg="手机号已注册"))
#生成随机短信验证码
sms_code="%04d" %random.randint(0,9999)
try:
self.redis.setex("sms_code_%s" % mobile, SMS_CODE_EXPIRES_SECONDS, sms_code)
except Exception as e:
logging.error(e)
self.write(dict(errcode=RET.DBERR, errmsg='数据库出错'))
#发送短信验证码
global result
try:
result=ccp.sendTemplateSMS(mobile,[sms_code,SMS_CODE_EXPIRES_SECONDS/60],1)
except Exception as e:
logging.error(e)
self.write(dict(errcode=RET.THIRDERR, errmsg='发送短信失败'))
if result:
self.write(dict(errcode=RET.OK, errmsg='发送成功'))
else:
self.write(dict(errcode=RET.UNKOWNERR, errmsg='发送失败'))
| [
"834121195@qq.com"
] | 834121195@qq.com |
0a4b99a063f6e476bfab0df5ae0053215d637069 | 09f205f74070c53e694d52f0bc72e203a2fd224f | /docs_src/response_directly/tutorial001.py | 89c5968b143beda2f4c2bed5a5d5e349f9bb49f0 | [
"MIT"
] | permissive | RunningIkkyu/fastapi | 53c02fed44b9e30e8617c94ec902be7ca579e42b | 05736c40d3fbb008fd9cdbe1adb8fcef7676e0c6 | refs/heads/master | 2021-05-18T07:58:33.640797 | 2020-05-21T01:36:47 | 2020-05-21T01:36:47 | 251,189,158 | 2 | 0 | MIT | 2020-05-21T01:36:49 | 2020-03-30T03:13:43 | Python | UTF-8 | Python | false | false | 467 | py | from datetime import datetime
from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from fastapi.responses import JSONResponse
from pydantic import BaseModel
class Item(BaseModel):
title: str
timestamp: datetime
description: str = None
app = FastAPI()
@app.put("/items/{id}")
def update_item(id: str, item: Item):
json_compatible_item_data = jsonable_encoder(item)
return JSONResponse(content=json_compatible_item_data)
| [
"noreply@github.com"
] | noreply@github.com |
8712e858b64de06787ee9bb5d2e998e6682efc82 | 8021b3c09be3b0345ed1dac26073353b4226e8dd | /scripts/shashlik-run | 1962c58e4490f2715e545ddc410100ff512cc783 | [] | no_license | W3SS/shashlik-runtime-env | 510e7fe289019018eb81b996e29b27e1e2f2233a | ee103dc1955fe29aa05b9216c03d76f60c375bff | refs/heads/master | 2020-12-01T13:05:11.820420 | 2016-03-02T11:30:28 | 2016-03-02T11:30:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,327 | #!/usr/bin/env python3
import sys
import os
import subprocess
import argparse
from time import sleep
import socket
import threading
import socketserver
import http.server
from urllib import parse
PORT=60057
#Temporary prototype to launch the emulator and start the relevant app
#emulator is launched, and the icon is sent to the bootsplash
#starting the app is done by an ADB call into the running system, in a rather hacky way
#this will get replaced at some point, with shashlikd on the android side requesting things from here via TCP
#strategy depends on whether we re-use the emulator instance or start a new one
#Note. It is a proof of concept whislt we figure out what we want, I'm well aware that some parts are rubbish
#and there is are a few race conditions about startup
images_path = "/opt/shashlik/android"
lib_path = "/opt/shashlik/lib64"
shashlik_dir = os.path.expanduser("~/.local/share/shashlik") # FIXME use the XDG lib and put in constants.py
parser = argparse.ArgumentParser()
#icon name is based on the package name in the install process
parser.add_argument("package_name", help="the name of the package to run")
#we take the user facing name as an argument as it saves us parsing the .apk twice
parser.add_argument("pretty_name", help="A user facing name of the app")
args = parser.parse_args()
httpd = 0
class ShashlikController(http.server.BaseHTTPRequestHandler):
def apk_file(s):
print("Sending APK")
apk_name = args.package_name
apk_path = shashlik_dir + "/" + apk_name + ".apk"
if os.path.exists(apk_path):
s.send_response(200)
s.send_header("Content-type", "application/vnd.android.package-archive")
s.end_headers()
with open(apk_path, "rb") as apk_file:
while True:
chunk = apk_file.read(1024)
if (not chunk):
break
s.wfile.write(chunk)
os.unlink(apk_path)
else:
s.send_response(403)
s.end_headers()
def startup(s):
apk_name = args.package_name
s.send_response(200)
s.send_header("Content-type", "text/plain")
s.end_headers()
s.wfile.write(apk_name.encode())
def do_GET(s):
url = parse.urlparse(s.path)
print (url)
if url.path == "/startup":
return s.startup()
if url.path == "/apk_file":
return s.apk_file()
s.send_response(404)
s.end_headers()
#starts the emulator instance.
#returns a subprocess.Popen instance
def start_emulator():
try:
os.mkdirs(shashlik_dir+"/system")
except:
pass
emulator_args = [
"/opt/shashlik/bin/emulator64-x86",
"-sysdir", "%s" % images_path ,
"-system","%s/system.img" % images_path ,
"-ramdisk", "%s/ramdisk.img" % images_path ,
"-kernel", "%s/kernel-qemu" % images_path ,
"-memory", "512",
"-data", "%s/userdata.img" % shashlik_dir,
"-datadir", "%s/system" % shashlik_dir,
"-noskin",
"-gpu", "on",
"-selinux", "disabled"]
emulator_env = os.environ
emulator_env["LD_LIBRARY_PATH"] = lib_path + ":" + os.getenv("LD_LIBRARY_PATH","/lib")
emulator_env["PATH"] = "/opt/shashlik/bin" + ":" + os.getenv("PATH", "/usr/bin:/bin")
emulator_env["SHASHLIK_APPNAME"] = args.pretty_name
emulator_env["SHASHLIK_ICON"] = "%s/%s.png" % (shashlik_dir, args.package_name)
return subprocess.Popen(emulator_args, env=emulator_env)
#send an icon to the bootloader
def send_icon(icon_path):
socket_path = "/tmp/shashlik_controller"
if os.path.exists(socket_path):
os.remove(socket_path)
server = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server.bind(socket_path)
server.listen(1)
connection, address = server.accept()
with open(icon_path, "rb") as icon:
while True:
chunk = icon.read(1024)
if (not chunk):
break
connection.send(chunk)
def start_controller():
global httpd
httpd = socketserver.TCPServer(("", PORT), ShashlikController, bind_and_activate=False)
httpd.allow_reuse_address=True
httpd.server_bind()
httpd.server_activate()
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
finally:
httpd.server_close()
#invoke ADB to install the apk if needed
#returns true on success, false if failed
def install_app(package_path):
try:
out = subprocess.check_output(args=["/opt/shashlik/bin/adb",
"-e",
"install",
package_path],
universal_newlines=True)
print (out)
rc = "Success" in out
if rc:
os.unlink(apk_path)
return rc
except:
return False
def launch_app(package_name):
try:
out = subprocess.check_output(args=["/opt/shashlik/bin/adb",
"-e",
"shell",
"monkey", "-p", package_name, "-c", "android.intent.category.LAUNCHER", "1"],
universal_newlines=True)
print (out)
return "injected" in out
except:
return False
apk_path = shashlik_dir + "/" + args.package_name + ".apk"
#if there's an emulator just re-use it
if subprocess.call(args=["pgrep", "emulator64-x86"]) == 0:
install_app(apk_path)
launch_app(args.package_name)
sys.exit(0)
print("starting emulator")
emulator_process = start_emulator()
#send the icon in a new thread way so we don't keep blocking if the emulator failed to start
icon_path = shashlik_dir + "/" + args.package_name + ".png"
icon_thread = threading.Thread(target=send_icon, args=(icon_path,))
icon_thread.daemon = True
icon_thread.start()
controller_thread = threading.Thread(target=start_controller)
controller_thread.start()
#block until the user closes the emulator
if emulator_process.returncode == None:
emulator_process.wait()
httpd.shutdown()
| [
"kde@davidedmundson.co.uk"
] | kde@davidedmundson.co.uk | |
c7e3482178a2932cfd75e3a25effaf3b4d35cca1 | 3629a82a0da2fa4d61b44ee70a52be57a5a72def | /validate_2.py | 6a2115cd4c228c449abd431510df48680b9c9917 | [] | no_license | lippinj/dl-course-project | 28c376f8c4e4d5b2df92178e80105c9715ad3a97 | 3e1a07ec63127d0f42a04869bee773ee327de9d8 | refs/heads/master | 2020-05-24T16:06:05.988412 | 2019-05-18T20:25:02 | 2019-05-18T20:25:02 | 187,348,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,495 | py | import sys
import time
import numpy as np
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
# Calculates the validation error.
#
# The following file(s) are read:
# mean_ratings_[#].npy
# validate_[#].npy
# naive_2_me_[#].pth
# naive_2_ce_[#].pth
# naive_2_pa_[#].pth
#
# Run examples:
# python validate_2.py 10k
# python validate_2.py all
######################
# Parse command line #
######################
SPACER = '=' * 78
print('=============================== Validation v2 ===============================')
assert(len(sys.argv) == 2)
customers_str = sys.argv[1]
assert(customers_str in ('1k', '10k', '25k', '100k', 'all'))
num_customers = {
'1k' : 1000,
'10k' : 10000,
'25k' : 25000,
'100k': 100000,
'all' : 480189
}[customers_str]
print('Number of customers: {:>8,}'.format(num_customers))
num_movies = 17770
print('Number of movies: {:>8,}'.format(num_movies))
print(SPACER)
########################
# Read validation data #
########################
filename_data = 'validate_{}.npy'.format(customers_str)
t0 = time.time()
data = np.load(filename_data)
customer_ids = torch.tensor(data[:,0], dtype=torch.long).view(-1)
movie_ids = torch.tensor(data[:,1], dtype=torch.long).view(-1)
ratings = torch.tensor(data[:,2], dtype=torch.float).view(-1)
num_points = data.shape[0]
t1 = time.time()
del data
print('Read {:,} data points from {} in {:.1f} s.'.format(num_points, filename_data, t1 - t0))
#####################
# Read mean ratings #
#####################
t0 = time.time()
filename_ta = 'tally_{}.npy'.format(customers_str)
tally = torch.tensor(np.load(filename_ta), dtype=torch.float)
logdists = torch.log(tally[movie_ids])
t1 = time.time()
print('Read tally from {} in {:.1f} s.'.format(filename_ta, t1 - t0))
#######################
# Model specification #
#######################
filename_me = 'naive_2_me_{}.pth'.format(customers_str)
filename_ce = 'naive_2_ce_{}.pth'.format(customers_str)
filename_pa = 'naive_2_pa_{}.pth'.format(customers_str)
dim_customers = 20 # customer embedding dimensions
dim_movies = 20 # movie embedding dimensions
t0 = time.time()
movie_embedding = nn.Embedding(num_movies, dim_movies)
movie_embedding.load_state_dict(torch.load(filename_me))
movie_embedding.eval()
customer_embedding = nn.Embedding(num_customers, dim_customers)
customer_embedding.load_state_dict(torch.load(filename_ce))
customer_embedding.eval()
predict_appeal = nn.Sequential(
nn.Linear(dim_customers + dim_movies, 100),
nn.ReLU(),
nn.Linear(100, 100),
nn.Tanh(),
nn.Linear(100, 20),
nn.Tanh(),
nn.Linear(20, 5)
)
predict_appeal.load_state_dict(torch.load(filename_pa))
predict_appeal.eval()
t1 = time.time()
print('Loaded models in {:.1f} s'.format(t1 - t0))
print(SPACER)
##############################
# Calculate validation error #
##############################
m = movie_embedding(movie_ids)
c = customer_embedding(customer_ids)
appeal = predict_appeal(torch.cat((c, m), dim=1)).view(num_points, 5)
dist = F.softmax(logdists + appeal, dim=1)
p = torch.mm(dist, torch.tensor([1., 2., 3., 4., 5.]).view(5, 1)).view(num_points)
criterion = nn.MSELoss(reduction='mean')
mse = criterion(p, ratings).item()
rmse = np.sqrt(mse)
print('Validation MSE: {:.4f}'.format(mse))
print('Validation RMSE: {:.4f}'.format(rmse))
| [
"joonas.lipping@aalto.fi"
] | joonas.lipping@aalto.fi |
48d878bda4745416936c1effc119d9c44bbbae23 | 8bee29f4857fe2223140558aebf0ce1731c47aa5 | /wk5_initals.py | ae71e1e9c9e9d4663f9d14404847a78e2bf28be1 | [] | no_license | jsweeney3937/PRG105 | 65c50d9812c56ddf844be2f34eee04440a9c740c | 69c0ee263984565f108a0981910617ac458e1fa9 | refs/heads/master | 2020-06-16T15:29:19.901722 | 2017-07-10T23:06:16 | 2017-07-10T23:06:16 | 94,151,281 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py |
def get_user_input():
# gets users name
user_name = input("Enter your first, middle, and last name. ")
# returns users name
return user_name
def main():
# sets name to what is returned from function get_user_input
name = get_user_input()
# splits the string dependant on spaces
name_split = name.split()
# loop that runs for each character in the sting
for char in name_split:
# prints the first char of the string as uppercase, followed by a period
# and prints results without a new line at the end
print(char[0].upper() + ".", end="")
main()
| [
"noreply@github.com"
] | noreply@github.com |
712a3dd38ff9d895b451988c8fb07dbf1e4f33b6 | 5e20fd3fca31b19c12b05d6045577ab79c3f2df7 | /course_3/helper-concierge/api/cassandra_api/api.py | 7528ca6a4d0279bb52dbfc92e3aa73d75c87321f | [] | no_license | yashin-alexander/itmo | 0fa690572a732e53d4ad70c683b5e9c62ed64363 | f4cbbb13088235ee488848c9e266a077fd8dd1e4 | refs/heads/master | 2021-07-07T14:28:35.874269 | 2019-01-31T17:08:55 | 2019-01-31T17:08:55 | 133,807,262 | 3 | 2 | null | 2018-10-02T19:55:10 | 2018-05-17T12:02:18 | Java | UTF-8 | Python | false | false | 3,606 | py | import json
from flask import request, Response
from functools import wraps
from cassandra.cluster import Cluster
from . import constants
def catcher(f):
@wraps(f)
def decorated(*args, **kwargs):
try:
return f(*args, **kwargs)
except Exception as err:
print(err)
return Response(status=500, response='{"status": "Failure"}')
return decorated
class CassandraAPI:
def __init__(self):
self.cluster = Cluster(constants.NODE_IPS)
self.session = self.cluster.connect(constants.KEYSPACE_NAME)
@staticmethod
def to_json(data):
return json.dumps(data) + "\n"
@property
def request_parameters(self):
return request.args.to_dict()
def response(self, status_code, data):
json_data = json.dumps(data, indent=8, sort_keys=True, default=str)
return Response(
status=status_code,
mimetype="application/json",
response=json_data
)
# CREATE methods
def _create_record(self, database):
data = request.json
values = ''
for key in constants.DATABASES_FIELDS[database]:
values += ''
values += str(data[key])
values += ','
query = "INSERT INTO {}{} VALUES ({});" \
.format(database, constants.DATABASES_COLUMNS[database], values[:-1])
print(query)
self.session.execute(query)
@catcher
def create_register(self):
self._create_record(constants.DB_REGISTER)
return self.response(200, {})
@catcher
def create_user_activity(self):
self._create_record(constants.DB_USER_ACTIVITY)
return self.response(200, {})
@catcher
def create_enter_attempts(self):
self._create_record(constants.DB_ENTER_ATTEMPTS)
return self.response(200, {})
# READ methods
def _get_table_data(self, table):
query = 'SELECT * FROM {}'.format(table)
return self.session.execute(query)
@catcher
def user_activity(self):
data = list(self._get_table_data(constants.DB_USER_ACTIVITY))
return self.response(200, data)
@catcher
def register(self):
data = list(self._get_table_data(constants.DB_REGISTER))
return self.response(200, data)
@catcher
def enter_attempts_by_day(self):
data = list(self._get_table_data(constants.DB_ENTER_ATTEMPTS))
return self.response(200, data)
# UPDATE methods
@catcher
def update_register_by_uuid(self):
uuid = request.json['uuid']
data = request.json['data']
query = 'UPDATE register SET {} WHERE user_id IN ({});'.format(data, uuid)
self.session.execute(query)
return self.response(200, {})
# DELETE methods
@catcher
def delete_registers(self):
uuids = request.json['uuids']
self.session.execute('DELETE FROM {} WHERE user_id IN ({});'
.format(constants.DB_REGISTER, uuids))
return self.response(200, {})
@catcher
def delete_enter_attempts(self):
uuids = request.json['uuids']
self.session.execute('DELETE FROM {} WHERE user_id IN ({});'
.format(constants.DB_ENTER_ATTEMPTS, uuids))
return self.response(200, {})
@catcher
def delete_user_activity(self):
uuids = request.json['uuids']
self.session.execute('DELETE FROM {} WHERE user_id IN ({});'
.format(constants.DB_USER_ACTIVITY, uuids))
return self.response(200, {})
| [
"alexandr.yashin@emlid.com"
] | alexandr.yashin@emlid.com |
d2322af65b63f06a06a46e66bbc33aa1063a2d35 | 01d69def07699396329459869fa9e26d1d85afec | /lib/deadline_utils.py | ab22243196e4532a21942717682c85492f3b6840 | [] | no_license | scottwillman/dmx | a2b16cc8007f94187de6eb8077b056d7aaa32e19 | f875b0816190736ef240adc7088ec77dd1336005 | refs/heads/master | 2021-01-15T15:43:44.328867 | 2017-06-23T16:35:42 | 2017-06-23T16:35:48 | 54,001,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | import os, sys
def getRenderChunkSize(in_frame, out_frame, cores_available=16):
import math
dur = out_frame - in_frame + 1
chunks = math.ceil(dur/float(cores_available))
return int(chunks)
def deadline_buildJobInfoFile(frame_start, frame_end, job_name, chunk_size, job_files_dir, priority=90):
data = [
"Plugin=Nuke",
"Frames=%s-%s" % (frame_start, frame_end),
"Name=%s" % job_name,
"ChunkSize=%s" % chunk_size,
"Priority=%s" % priority,
"ConcurrentTasks=4",
]
filename = "job_info_%s.job" % job_name
file_path = os.path.join(job_files_dir, filename)
with open(file_path, 'w') as f:
for line in data:
f.write(line + '\n')
return file_path
def deadline_buildPluginInfoFile(file_to_render, job_name, job_files_dir, write_node=None):
data = [
"SceneFile=%s" % file_to_render,
"Version=8.0",
"NukeX=False",
"BatchMode=True",
# "IsMovieRender=True"
]
if write_node:
data.append("WriteNode=%s"% write_node)
filename = "plugin_info_%s.job" % job_name
file_path = os.path.join(job_files_dir, filename)
with open(file_path, 'w') as f:
for line in data:
f.write(line + '\n')
return file_path
## USAGE EXAMPLE ##
# chunk_size = deadline_utils.getRenderChunkSize(in_frame, out_frame)
#
# job_name = "auto_comp_%s_v%03d.nk" % (shot_name, next_version)
#
# job_info_file_path = deadline_utils.deadline_buildJobInfoFile(in_frame, out_frame, job_name, chunk_size, job_files_dir, priority=90)
# plugin_info_file_path = deadline_utils.deadline_buildPluginInfoFile(out_script_path, job_name, job_files_dir)
#
# cmd = '/Applications/Thinkbox/Deadline6/DeadlineCommand.app/Contents/MacOS/DeadlineCommand %s %s' % (job_info_file_path, plugin_info_file_path)
# print "Launching to Queue: %s" % cmd
# os.system(cmd)
##
| [
"scottwillman@gmail.com"
] | scottwillman@gmail.com |
5b4990e2ccc7f88d349fcae58a780514b555a0dd | 9a2c9588a170aa8e4311d98aa5ade728ec8c9883 | /setup.py | 3ca07062459e7904de601e7d0cb9ab1968629549 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | gabrielegiammatteo/build_manpage | df5a681c981eb7027ad800e3deb1d2182e99daf1 | 516ca12512979ab8e1a45f24e502a9cd1331f284 | refs/heads/master | 2022-01-17T20:03:19.926844 | 2017-07-10T15:34:19 | 2017-07-10T15:34:19 | 96,616,485 | 1 | 1 | Apache-2.0 | 2022-01-12T01:23:14 | 2017-07-08T12:05:00 | Python | UTF-8 | Python | false | false | 319 | py | from distutils.core import setup
setup(
name='argparse-manpage',
version='0.0.1',
url='https://github.com/gabrielegiammatteo/build_manpage',
license='Apache 2.0',
py_modules = ['build_manpage'],
author='Gabriele Giammatteo',
author_email='gabriele.giammatteo@eng.it',
description='',
)
| [
"gabriele.giammatteo@eng.it"
] | gabriele.giammatteo@eng.it |
4fb9a7439f7d5c3acfa37afad53991275507b5a5 | 95c027e7302751b335b33d287e0efac7483edfc3 | /etc/inflearn/7.DFS,BFS/7.BFS_미로의 최단거리 통로.py | 39935a7a7ee83b31577ff48de168bdb9840bd3bb | [] | no_license | kimchaelin13/Algorithm | 01bd4bcb24c58d5d82714e60272d5af91d2d9ce8 | 53f7f3cff5a141cf705af3c9f31cdb9ae997caff | refs/heads/master | 2023-02-03T08:58:26.660299 | 2020-12-20T17:01:16 | 2020-12-20T17:01:16 | 296,996,924 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | import sys
from collections import deque
sys.stdin = open("input.txt", "r")
dx=[-1,0,1,0]
dy=[0,1,0,-1]
board=[list(map(int,input().split())) for _ in range(7)]
dis=[[0]*7 for _ in range(7)] #(7,7)
Q=deque()
Q.append((0,0)) #시작점
#한번 방문한 곳은 다시 방문못하게 벽이되도록!
# 1으로 만들어버리면 벽이되는 효과, 체크배열대신 이렇게 쓰자
board[0][0] = 1
while Q: #Q가 비면 거짓이 되서 멈춘다
tmp=Q.popleft()
for i in range(4):
x=tmp[0]+dx[i]
y=tmp[1]+dy[i]
if 0<=x<=6 and 0<=y<=6 and board[x][y]==0:
board[x][y]=1
dis[x][y]=dis[tmp[0]][tmp[1]]+1
Q.append((x,y))
#벽으로 가로막혀서 못왔다는거임 그러면 -1을 출력하세요라고 문제
if dis[6][6]==0:
print(-1)
else:
print(dis[6][6])
| [
"kimchaelin13@gamil.com"
] | kimchaelin13@gamil.com |
a319937bbf4b1cf641a71113dc8eb6e3b886bf96 | 20880b1cbe1d1817726a9a063bf1821c9563300e | /light_transform/mm.py | 366ba0bd8b611b12f853cc973a50f7c6ea9d3172 | [] | no_license | zqcrafts/tools | de8072c9184bf8f0dbd309d126a54685c339e9f0 | a624f52fe7f11f5c53c247a07b7d0918ecccefcb | refs/heads/main | 2023-03-27T09:06:36.738504 | 2021-03-28T07:28:52 | 2021-03-28T07:28:52 | 352,273,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | import cv2
import numpy as np
def contrast_demo(img1, c, b): # 亮度就是每个像素所有通道都加上b
rows, cols, channel = img1.shape
blank = np.zeros([rows, cols, channel], img1.dtype) # np.zeros(img1.shape, dtype=uint8)
dst = cv2.addWeighted(img1, c, blank, 0, b)
cv2.imshow("con_bri_demo", dst)
img1 = cv2.imread("E:\\contest\\CVPR_UG2_Challenge\\DarkFace_Train_2021\\image\\4.png", cv2.IMREAD_COLOR)
contrast_demo(img1, 13, 15)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"598768203@qq.com"
] | 598768203@qq.com |
e4f668c29f8509034aa848c7bc5ab56a68eb64c4 | ca0e761b2948b2bd93d46e5bab610901f4a9936c | /data/convert_to_json_file.py | 6e7ef5a2f0f1f046e2029916e023b0653ab42ed7 | [
"MIT"
] | permissive | dojinkimm/go-krx | 7f122321f69a119594de1ee184be57eeb4f148f7 | b565696a7c13427f3320c6c43a529638ea06682e | refs/heads/main | 2023-03-11T22:06:00.291164 | 2021-02-28T13:29:51 | 2021-02-28T13:29:51 | 300,895,170 | 7 | 2 | null | 2021-02-23T12:34:18 | 2020-10-03T14:09:40 | Go | UTF-8 | Python | false | false | 1,695 | py | import json
import pandas as pd
dfstockcode = pd.read_html(
"http://kind.krx.co.kr/corpgeneral/corpList.do?method=download", header=0
)[0]
stock_information = list()
for (
i,
(
name,
symbol,
sector,
industry,
listing_date,
settlement_month,
representative,
homepage,
region,
),
) in enumerate(
zip(
dfstockcode.get("회사명"),
dfstockcode.get("종목코드"),
dfstockcode.get("업종"),
dfstockcode.get("주요제품"),
dfstockcode.get("상장일"),
dfstockcode.get("결산월"),
dfstockcode.get("대표자명"),
dfstockcode.get("홈페이지"),
dfstockcode.get("지역"),
)
):
if type(sector) == float:
sector = "없음"
if type(industry) == float:
industry = "없음"
if type(settlement_month) == float:
settlement_month = "없음"
if type(representative) == float:
representative = "없음"
if type(homepage) == float:
homepage = "없음"
if type(region) == float:
region = "없음"
symbol = str(symbol).zfill(6)
stock_information.append(
{
"name": name,
"symbol": symbol,
"sector": sector,
"industry": industry,
"listing_date": listing_date,
"settlement_month": settlement_month,
"representative": representative,
"homepage": homepage,
"region": region,
}
)
with open("data.json", "w", encoding='utf-8') as file:
json.dump(stock_information, file,indent=4, ensure_ascii=False)
file.write("\n")
| [
"dojinkim119@gmail.com"
] | dojinkim119@gmail.com |
4dd8d2e36e422541e87ac8beb1e2bb31a2cfe4b2 | 7dd412d672594d00ddd59a609ac704ee1d0c6833 | /learn/python_fpnp/udp/tcp_udp_server.py | 4796b7d360ae708bd142854f769d56ec2a0b66bd | [] | no_license | liyustar/starsnip | 34689dc931e0cac8884ad566362841ad31da0a8a | ac53fdf1cea3681116c24ffd62fa0d58ae778361 | refs/heads/master | 2016-09-06T09:09:53.288371 | 2014-04-16T12:59:49 | 2014-04-16T12:59:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py |
from threading import *
from socket import *
import os
UDPSERVENABLE = True
TCPSERVENABLE = True
BUFSIZE = 10240
PORT = 8888
ADDR = ('', PORT)
class UdpServThread(Thread):
def run(self):
sock = socket(AF_INET, SOCK_DGRAM)
sock.bind(ADDR)
while True:
data, addr = sock.recvfrom(BUFSIZE)
print('udp recv data\'s len is %d from %s' % (len(data), addr))
sock.sendto(data, addr)
class TcpClnt(Thread):
def __init__(self, sock, addr):
Thread.__init__(self)
self.sock = sock
self.addr = addr
def run(self):
data = sock.recv(BUFSIZE)
print('tcp recv data\'s len is %d' % len(data))
sock.send(data)
class TcpServThread(Thread):
def run(self):
sock = socket(AF_INET, SOCK_STREAM)
sock.bind(ADDR)
sock.listen(10)
while True:
csock, caddr = sock.accept()
print('tcp accept: %s %s' % (csock.getsockname(), csock.getpeername()))
clnt = TcpClnt(csock, caddr)
clnt.start()
if UDPSERVENABLE:
udpServ = UdpServThread()
udpServ.start()
print 'udp server start, wait data...'
if TCPSERVENABLE:
tcpServ = TcpServThread()
tcpServ.start()
print 'tcp server start, wait data...'
| [
"liyustar@gmail.com"
] | liyustar@gmail.com |
2bc2d9a96d32d718cd7212819c018fb6c1d25567 | 5cc4a73d6fb144d72e74b07a10b60fc36bfe50ec | /shops/migrations/0002_auto_20190330_1916.py | df99c08c2f3e516d78ab25dd75133d1b5afcbeba | [] | no_license | pedrofolch/digitalsoil | 79d9497dcbb54df3c7df64f9da35d71d592fe580 | 7b6d1ffd34e991cf87c91342e5336a97fa1cf59b | refs/heads/master | 2022-12-11T00:47:01.728729 | 2019-04-11T03:34:12 | 2019-04-11T03:34:12 | 120,937,159 | 0 | 0 | null | 2022-12-08T04:58:09 | 2018-02-09T17:49:10 | CSS | UTF-8 | Python | false | false | 659 | py | # Generated by Django 2.1.5 on 2019-03-31 02:16
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shops', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='elevation',
name='rast',
field=django.contrib.gis.db.models.fields.RasterField(blank=True, null=True, srid=4326),
),
migrations.AlterField(
model_name='shop',
name='poly',
field=django.contrib.gis.db.models.fields.PolygonField(blank=True, null=True, srid=4326),
),
]
| [
"pedro.folch@gmail.com"
] | pedro.folch@gmail.com |
7959a07f34b2edbdc85fcc91b921c6cc0277f220 | a8b7a01d0a67e15e490bf44338d0d1ac352ba913 | /hw/1/hw_2.py | e72b323d16b06ac843aee9e2cd5c106ffcff5862 | [] | no_license | logika03/Python_EC | 46c1ae913424eb2349cb5c712d7bbbcbe1c0d0a3 | 51da26a8b2aaf3d979a61589696240d0533c8f99 | refs/heads/master | 2022-12-19T07:45:00.704160 | 2020-09-25T19:35:42 | 2020-09-25T19:35:42 | 298,662,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,741 | py | import sys
def fv(names, name):
start_name, finish_name = separete_names(names)
letters_with_numbers = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5,'f': 6,'g': 7,'h': 8,'i': 9,'j': 10,'k': 11,'l': 12,'m': 13,'n': 14,
'o': 15,'p': 16,'q': 17,'r': 18,'s': 19,'t': 20,'u': 21,'v': 22,'w': 23,'x': 24,'y': 25,'z': 26}
numbers_with_letters = {1: 'a', 2: 'b', 3: 'c', 4: 'd', 5: 'e', 6: 'f', 7: 'g', 8: 'h', 9: 'i', 10: 'j', 11: 'k',
12: 'l', 13: 'm', 14: 'n',
15: 'o', 16: 'p', 17: 'q', 18: 'r', 19: 's', 20: 't', 21: 'u', 22: 'v', 23: 'w', 24: 'x',
25: 'y', 26: 'z'}
cur_min_value = sys.maxsize
count_itterat = 0
start = ''
finish = ''
while cur_min_value > 1 and count_itterat < 100:
count_itterat += 1
start, min_value_s = find_min_value(start_name, name[0:int((len(name))/2)], letters_with_numbers)
finish, min_value_f = find_min_value(finish_name, name[int((len(name)) / 2):len(name)], letters_with_numbers)
start_name = find_neighbors(start, numbers_with_letters, letters_with_numbers)
finish_name = find_neighbors(finish, numbers_with_letters, letters_with_numbers)
if min_value_s + min_value_f < cur_min_value:
cur_min_value = min_value_s + min_value_f
return start + finish
def find_neighbors(name, numbers_with_letters, letter_with_numbers):
new_names = []
for i in range(len(name)):
cur_number = letter_with_numbers[name[i:i+1]]
cur_number_bottom = cur_number - 1
if cur_number == 1:
cur_number_bottom = 26
cur_number_top = cur_number + 1
if cur_number == 26:
cur_number_top = 1
add_in_list(numbers_with_letters, cur_number_bottom, name, new_names, i)
add_in_list(numbers_with_letters, cur_number_top, name, new_names, i)
return new_names
def add_in_list(numbers_with_letters, number, name, new_names, i):
name = name[0:i] + numbers_with_letters[number] + name[i + 1:len(name)]
new_names.append(name)
def separete_names(names):
start_name = []
finish_name = []
for name in names:
start_name.append(name[0:int((len(name)) / 2)])
finish_name.append(name[int((len(name)) / 2):len(name)])
return start_name, finish_name
def find_min_value(all_names, name, letters):
cur_name = all_names[0]
min_value = sys.maxsize
for element in all_names:
cur_value = 0
for i in range(len(name)):
cur_value += abs(letters[element[i]] - letters[name[i]])
if cur_value < min_value:
min_value = cur_value
cur_name = element
return cur_name, min_value
| [
"65672195+logika03@users.noreply.github.com"
] | 65672195+logika03@users.noreply.github.com |
1479a8bf420e0a09485ee8af5c9bd4532b063ea6 | 6a1bf45def872d1e46957017c8215afdaa3902d4 | /app/views.py | 4da6c57e887e1d0d51baa1a4708dc397264e6c51 | [] | no_license | radoslav/brother_ql_print_label | 7d72b84458674bf3d0e0a4deab62499c326e3fea | 9a5c21477280b0d45e0835cb98afc505c7838579 | refs/heads/master | 2023-03-25T06:53:08.379068 | 2020-05-18T21:14:48 | 2020-05-18T21:14:48 | 347,132,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,864 | py | from flask import render_template, request, redirect
from rq.registry import FailedJobRegistry
from app import app
from app import q
from app.helpers.helper_config import yaml_to_printer
from app.helpers.helper_image import create_imgs_from_labels
from app.helpers.helper_json import jsonToLabels
from app.helpers.helper_printing import is_printer_on
from app.print_task import print_task
printer = yaml_to_printer()
@app.route('/')
def index():
q_len = len(q)
jobs = q.jobs
registry_failed = FailedJobRegistry(queue=q)
failed_jobs = []
for job_id in registry_failed.get_job_ids():
failed_jobs.append(q.fetch_job(job_id))
return render_template("index.html", jobs=jobs, q_len=q_len, failed_jobs=failed_jobs,
failed_len=registry_failed.count)
@app.route("/api", methods=["GET"])
def api():
return "ok", 200
@app.route("/api/printer_on", methods=["GET"])
def printer_on():
return str(is_printer_on(printer)), 200
@app.route("/api/failed_clear", methods=["POST"])
def failed_clear():
registry_failed = FailedJobRegistry(queue=q)
for job_id in registry_failed.get_job_ids():
registry_failed.remove(job_id, delete_job=True)
return redirect("/")
@app.route("/api/requeue", methods=["POST"])
def requeue():
registry_failed = FailedJobRegistry(queue=q)
for job_id in registry_failed.get_job_ids():
registry_failed.requeue(job_id)
return redirect("/")
@app.route("/api/queue_clear", methods=["POST"])
def queue_clear():
q.empty()
return redirect("/")
@app.route("/api/print", methods=["POST"])
def api_print():
return_dict = {'success': False, 'print_material_ids': []}
# check for printer on
if not is_printer_on(printer): # negation for testing
# get labels
labels = jsonToLabels(request.get_json())
# for each sent to queue
for label in labels:
q.enqueue(print_task, printer, label, description=label.id)
return_dict['print_material_ids'].append(label.id)
return_dict['message'] = 'printer online!'
return_dict['success'] = True
return return_dict, 200
# curl --header "Content-Type: application/json" --request POST --data '[{"id":1463, "supplier_name": "ENDUTEX", "print_material_type": "backlight", "print_material": "Vinyl BP (endutex) niezaciągający wody", "url": "http://192.168.1.100/warehouse_print_materials/1463", "copies":2}]' http://127.0.0.1:5000/api/preview
@app.route("/api/preview", methods=["POST"])
def preview():
return_dict = {'success': False, 'print_material_ids': []}
app.logger.warning("dsdsds")
labels = jsonToLabels(request.get_json())
imgs = create_imgs_from_labels(labels)
for i, img in enumerate(imgs):
img.save('./app/img/test_copy_' + str(i) + '.png')
return return_dict, 200 | [
"radoslaw.brzozowski@gmail.com"
] | radoslaw.brzozowski@gmail.com |
d0805939216851eed9bad52fae5dabfb53dc7d22 | 75d47fe8be39983de208be9f8a9b63039c880c69 | /utils.py | 6f464fcdb98e5dba61fdbaa594de41cf990f7423 | [
"MIT"
] | permissive | zkangkang0/LPN | 4b24e30162482e3592e84688b7f911dd42289df4 | 8095d59c0df889ccd05a7268288e83e842b10fac | refs/heads/main | 2023-04-16T07:09:28.648438 | 2021-05-01T09:44:03 | 2021-05-01T09:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,184 | py | import os
import torch
import yaml
import torch.nn as nn
import parser
from model import ft_net, two_view_net, three_view_net
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1 # count the image number in every class
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * len(images)
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
# Get model list for resume
def get_model_list(dirname, key):
if os.path.exists(dirname) is False:
print('no dir: %s'%dirname)
return None
gen_models = [os.path.join(dirname, f) for f in os.listdir(dirname) if
os.path.isfile(os.path.join(dirname, f)) and key in f and ".pth" in f]
if gen_models is None:
return None
gen_models.sort()
last_model_name = gen_models[-1]
return last_model_name
######################################################################
# Save model
#---------------------------
def save_network(network, dirname, epoch_label):
if not os.path.isdir('./model/'+dirname):
os.mkdir('./model/'+dirname)
if isinstance(epoch_label, int):
save_filename = 'net_%03d.pth'% epoch_label
else:
save_filename = 'net_%s.pth'% epoch_label
save_path = os.path.join('./model',dirname,save_filename)
torch.save(network.cpu().state_dict(), save_path)
if torch.cuda.is_available:
network.cuda()
######################################################################
# Load model for resume
#---------------------------
def load_network(name, opt):
# Load config
dirname = os.path.join('./model',name)
last_model_name = os.path.basename(get_model_list(dirname, 'net'))
epoch = last_model_name.split('_')[1]
epoch = epoch.split('.')[0]
if not epoch=='last':
epoch = int(epoch)
config_path = os.path.join(dirname,'opts.yaml')
with open(config_path, 'r') as stream:
config = yaml.load(stream)
opt.name = config['name']
opt.data_dir = config['data_dir']
opt.train_all = config['train_all']
opt.droprate = config['droprate']
opt.color_jitter = config['color_jitter']
opt.batchsize = config['batchsize']
opt.h = config['h']
opt.w = config['w']
opt.share = config['share']
opt.stride = config['stride']
opt.LPN = config['LPN']
if 'pool' in config:
opt.pool = config['pool']
if 'h' in config:
opt.h = config['h']
opt.w = config['w']
if 'gpu_ids' in config:
opt.gpu_ids = config['gpu_ids']
opt.erasing_p = config['erasing_p']
opt.lr = config['lr']
opt.nclasses = config['nclasses']
opt.erasing_p = config['erasing_p']
opt.use_dense = config['use_dense']
opt.fp16 = config['fp16']
opt.views = config['views']
opt.block = config['block']
if opt.use_dense:
model = ft_net_dense(opt.nclasses, opt.droprate, opt.stride, None, opt.pool)
# if opt.LPN:
# model = LPN(opt.nclasses)
if opt.views == 2:
model = two_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share)
elif opt.views == 3:
if opt.LPN:
model = three_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share, LPN=True, block=opt.block)
else:
model = three_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share)
if 'use_vgg16' in config:
opt.use_vgg16 = config['use_vgg16']
if opt.views == 2:
model = two_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share, VGG16 = opt.use_vgg16)
if opt.LPN:
model = two_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share, VGG16 = opt.use_vgg16, LPN = True, block=opt.block)
elif opt.views == 3:
model = three_view_net(opt.nclasses, opt.droprate, stride = opt.stride, pool = opt.pool, share_weight = opt.share, VGG16 = opt.use_vgg16)
# load model
if isinstance(epoch, int):
save_filename = 'net_%03d.pth'% epoch
else:
save_filename = 'net_%s.pth'% epoch
# save_filename = 'net_099.pth'
save_path = os.path.join('./model',name,save_filename)
print('Load the model from %s'%save_path)
network = model
network.load_state_dict(torch.load(save_path))
return network, opt, epoch
def toogle_grad(model, requires_grad):
for p in model.parameters():
p.requires_grad_(requires_grad)
def update_average(model_tgt, model_src, beta):
toogle_grad(model_src, False)
toogle_grad(model_tgt, False)
param_dict_src = dict(model_src.named_parameters())
for p_name, p_tgt in model_tgt.named_parameters():
p_src = param_dict_src[p_name]
assert(p_src is not p_tgt)
p_tgt.copy_(beta*p_tgt + (1. - beta)*p_src)
toogle_grad(model_src, True)
| [
"744914445@qq.com"
] | 744914445@qq.com |
011ea3f0b0343be617c412613dba74ba04b4e3c4 | 1b7c9d7824a9c8f87acd5ea4f0974fbf6ac9011a | /utilities/utils.py | 0c513e930742b2047131dc76038c5f8b78ea5df3 | [] | no_license | stivenlopezg/BostonSageMaker | 993bcd725c85407cf5a67db6fb699a7cc2c2f29f | 80f2758c064380775f179715c4a29cad9bd09c20 | refs/heads/master | 2023-06-26T02:02:36.899920 | 2021-07-01T18:26:35 | 2021-07-01T18:26:35 | 382,104,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 724 | py | import os
import xgboost
import pandas as pd
def download_artifact(s3_path: str, localpath: str):
"""
:param s3_path:
:param localpath:
:return:
"""
return os.system(command=f"aws s3 cp {s3_path} {localpath}")
def decompress_artifact(localpath: str):
"""
:param localpath:
:return:
"""
return os.system(command=f"tar xvf {localpath}")
def prediction(estimator, filepath: str):
"""
:param estimator:
:param filepath:
:param score:
:return:
"""
data = pd.read_csv(filepath, sep=",", header=None)
data.columns = [f"f{i}" for i in range(0, data.shape[1])]
data["prediction"] = estimator.predict(xgboost.DMatrix(data=data))
return data
| [
"Stiven.lopez2"
] | Stiven.lopez2 |
614a3115667aa3206cf5124a4235719997c02771 | c5633afc5dc73547c729b5dcbd9f295f6e58a3b4 | /FinancialAnalytics/AssistedVariableCreation.py | 6b6c4bde07ccf2e12aa07740c86c3425641c9a52 | [] | no_license | nareshshah139/IE-Group-D-Term3 | fc2cda1b1ebcd721aa482afe46c5d83b998969a1 | afd736367e8e396f0f12c2742499fc4f34f19dc9 | refs/heads/master | 2021-01-21T19:47:25.989069 | 2017-02-02T06:04:16 | 2017-02-02T06:04:16 | 55,095,441 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,141 | py | import pandas as pd
import itertools
def load_training(dataset):
training = pd.read_csv(dataset,sep=";", decimal=".", thousands =",")
return training
def load_test(dataset):
test = pd.read_csv(dataset)
return test
def Variable_create(dataset):
training = load_training(dataset)
print(list(training))
print(training)
print(training.columns)
column1 = input("Please select the columns you want separated by spaces:").split(" ")
column_f = list(itertools.combinations(column1,2))
k = list(column_f)
print(k)
operation = input("Please select the operations between the columns:")
operators = ['+','-','*','/']
if operation not in operators:
print("operation not supported")
print("Do you want to do this operation for all the combinations of columns?")
confirmation = input("Y/N: ")
if confirmation == 'Y':
if operation == "+":
i = 0
for columns1,columns2 in k:
training["Sum_column_"+str(len(training.columns)+i)] = training[columns1] + training[columns2]
i = i+1
elif operation == "-":
i = 0
for columns1,columns2 in k:
training["Diff_column_"+str(len(training.columns)+i)] = training[training[columns1]] - training[training.columns[columns2]]
i = i+1
elif operation == "*":
i = 0
for columns1,columns2 in k:
training["Product_column_"+str(len(training.columns)+i)] = training[training[columns1]] * training[training[columns2]]
i = i+1
elif operation == "/":
i = 0
print("I was here")
for columns1,columns2 in k:
training[columns1].fillna(training[columns1].median(),inplace = True)
training[columns2].fillna(training[columns2].median(),inplace = True)
training["Division_column_"+str(i)] = training[columns1] / training[columns2]
i = i+1
print(training.head())
return training
| [
"noreply@github.com"
] | noreply@github.com |
febdf425c24b86d9f18a967a7d6da3f814fa76a6 | ecc09674541b5a715e313eae8d1bd2a1b15aae4f | /venv/Scripts/pip-script.py | c4c0f1a1023cb8cebe1d5f2bfd5b68550fa241f9 | [] | no_license | 282787906/CheckFileType | 2ee3a89645946f9abe4bcf6a8a40b9492d6eee41 | 04f0531c044c6ed6dd676ca6e96251c14b8d5dd5 | refs/heads/master | 2020-08-30T02:23:44.951002 | 2019-10-29T08:07:29 | 2019-10-29T08:07:29 | 218,234,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | #!E:\lqg\Workspace\python\CheckFileType\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"282787906@qq.com"
] | 282787906@qq.com |
a27ffb478d2e67e0421e6bd0ec93873bf9393a62 | 36957a9ce540846d08f151b6a2c2d582cff1df47 | /VR/Python/Python36/Lib/lib2to3/tests/data/crlf.py | ae969da2ed9f77889127906baddd4a6ef5472fd3 | [] | no_license | aqp1234/gitVR | 60fc952307ef413e396d31e0d136faffe087ed2b | e70bd82c451943c2966b8ad1bee620a0ee1080d2 | refs/heads/master | 2022-12-29T15:30:12.540947 | 2020-10-07T15:26:32 | 2020-10-07T15:26:32 | 290,163,043 | 0 | 1 | null | 2020-08-25T09:15:40 | 2020-08-25T08:47:36 | C# | UTF-8 | Python | false | false | 127 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d910ad886333abf3664a4fb4290d3b81307a16c6d9ca14356b3644a9aae6e714
size 50
| [
"aqp1234@naver.com"
] | aqp1234@naver.com |
7c691685311f964776bd731d24ea73ab2268ea4a | a6719f4815ff41d3a1f09e9a63a64c4582d03702 | /file_handling/read_file_demo.py | b131427b8bc801751761bb301ff7f1a6d3fecacc | [
"MIT"
] | permissive | thanh-vt/python-basic-programming | 8136007b8435dae6339ae33015fe536e21b19d1d | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | refs/heads/main | 2023-01-30T12:57:36.819687 | 2020-12-13T17:27:05 | 2020-12-13T17:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 144 | py | f = open('demo_file.txt', 'r')
print(f.read())
# read only part of a file (first 5 characters)
f = open('demo_file.txt', 'r')
print(f.read(5))
| [
"thanhvt@vissoft.vn"
] | thanhvt@vissoft.vn |
0d365ff68bf6e46ed57dda06807614bc240b8a70 | 9cb1a903cd779910bd3a23be35ba3b3406deacab | /main.py | 2964f7ee5f3abb94e46f2a3217b90e7553b46cec | [] | no_license | F-ridge/realtime_sentiment | 2f36c0629d2bbb26bb9f703bb0b69d332e50bf80 | 04f2bca76ba74063a28d61a7221180ef75b3fed3 | refs/heads/master | 2022-11-13T04:23:49.063856 | 2020-07-11T07:59:02 | 2020-07-11T07:59:02 | 262,238,751 | 0 | 2 | null | 2020-06-11T07:43:32 | 2020-05-08T05:52:01 | Python | UTF-8 | Python | false | false | 2,331 | py | import time
import os
import sys
sys.path.append(os.getcwd()) #カレントディレクトリがrealtime_sentimentの前提
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import json
from models.src.rnn.data.dataset_readers.reader import TwiReader
from models.src.rnn.model.model import RnnClassifier
from allennlp.predictors import Predictor
import models.src.rnn.predictor.predictor
from realtime_sentiment.lib.auth import google_spreadsheet_auth
from realtime_sentiment.src.streaming.streaming import input
from realtime_sentiment.src.output.output import output
model_path = 'models/serials/xxlarge-bin/model.tar.gz'
input_path = 'data/input.jsonl'
output_path = 'data/predict.jsonl'
predictor = Predictor.from_path(archive_path=model_path, predictor_name='sentiment', cuda_device=-1)
def main(service):
# start = time.time()
time.sleep(2)
new_massage = input()
# input_time = time.time() - start
# print ("input_time:{:.3f}".format(input_time) + "[sec]")
if new_massage:
with open(input_path, 'r') as f:
json_lines = f.readlines()
json_dicts = []
for line in json_lines:
json_dicts.append(predictor.load_line(line))
output_dicts = predictor.batch_json_to_labeled_instances(json_dicts)
for i in range(len(output_dicts)):
del output_dicts[i]["text"]
outputs = [repr(json.dumps(d).encode().decode('unicode-escape')).strip('\'') + '\n' for d in output_dicts]
with open(output_path, 'w') as f:
f.writelines(outputs)
# predict_time = time.time() - start - input_time
# print ("predict_time:{:.3f}".format(predict_time) + "[sec]")
time.sleep(1.5)
output(service)
# output_time = time.time() - start - input_time - predict_time
# print ("output_time:{:.3f}".format(output_time) + "[sec]")
else:
print("No new messages!")
# elapsed_time = time.time() - start
# print ("elapsed_time:{:.3f}".format(elapsed_time) + "[sec]")
if __name__ == '__main__':
once = 0
service = google_spreadsheet_auth()
if once:
main(service)
else:
count = 0
while True:
count+=1
print(count)
main(service)
if count == 1000:
break | [
"fujino.junpei@unipro.co.jp"
] | fujino.junpei@unipro.co.jp |
e49fdf5c4abe3c76af6016f2d03578e143e30192 | 82572927501820811070bffdb3aa4dcbc8c2718b | /users/migrations/0001_initial.py | 8fdd1278362f2588f18570126df9fb7821c731a6 | [] | no_license | solomon-lah/market-hub | d6accb6ba66e8ec4c19808bd0eabe42b72ea5868 | bd1b5d95446e57e9ca6bc57d845de4c51e310137 | refs/heads/master | 2022-11-05T16:05:41.833279 | 2020-06-20T12:57:49 | 2020-06-20T12:57:49 | 273,704,906 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | # Generated by Django 3.0.5 on 2020-04-25 12:09
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='itemsTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('itemName', models.CharField(max_length=100)),
('itemPrice', models.IntegerField()),
('description', models.CharField(max_length=300)),
('category', models.CharField(max_length=20)),
('img_1', models.FileField(upload_to='items/')),
('img_2', models.FileField(upload_to='items/')),
('dateUploaded', models.DateTimeField()),
],
),
migrations.CreateModel(
name='userTable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254, unique=True)),
('passcode', models.CharField(max_length=30)),
('surname', models.CharField(max_length=20)),
('firstname', models.CharField(max_length=20)),
('gender', models.CharField(max_length=6)),
('phoneNumber', models.CharField(max_length=11)),
('address', models.CharField(max_length=300)),
('img', models.FileField(upload_to='users/')),
],
),
]
| [
"solomonodediran@gmail.com"
] | solomonodediran@gmail.com |
5ab318537552be6e8e382dc388e95e4372a696ff | bba1ef5990147b8caad0e65c5886b3cb265a9362 | /ntnn/commons/nl.py | 9572c2116b076b6a963f4cfb8f352fb912539b1b | [] | no_license | milysun/ntrust-ntnn | 05fe177dd9a8a7f53c815c6d14be9417397f8075 | ffd3bc33f75d63234a32ce66cc995da3b402a3de | refs/heads/master | 2020-05-19T19:31:56.352695 | 2018-12-01T19:10:52 | 2018-12-02T14:05:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,533 | py | import re
import math
from ntnn.utils import arr_from
CHO, JUNG, JONG = 19, 21, 28
UNK = '\u00e0' # unknown chars. should be lowercase
ASCII = range(0x20, 0x7e+1)
HANGUL = range(0xac00, 0xd7a3+1)
SYMBOL = range(0x2000, 0x206f+1)
SYMBOL2 = range(0x318d, 0x318d+1)
PAD_ID = 0
UNK_ID = PAD_ID + 1
ASCII_ID = UNK_ID + 1
HANGUL_ID = ASCII_ID + 1
CHO_ID = HANGUL_ID
JUNG_ID = CHO_ID + CHO
JONG_ID = JUNG_ID + JUNG
SYMBOL_ID = JONG_ID + JONG
SYMBOL2_ID = SYMBOL_ID + len(SYMBOL)
def char_ids(ch):
n = ord(ch)
if n in HANGUL:
han = n - HANGUL[0]
cho = int(han / JONG / JUNG) + HANGUL_ID
jung = int(han / JONG) % JUNG + JUNG_ID
jong = int(han % JONG) + JONG_ID
return [cho, jung, jong]
if n in ASCII:
return n - ASCII[0] + ASCII_ID
if n in SYMBOL:
return n - SYMBOL[0] + SYMBOL_ID
if n in SYMBOL2:
return n - SYMBOL2[0] + SYMBOL2_ID
return [UNK_ID]
def sent_ids(sentence, **kwargs):
return words_ids(re.split(r'\s', sentence), **kwargs)
def words_ids(sentence, maxwords=100, maxchars=10):
"""
Args:
sentence: [nwords]
maxwords: max words per sentence
maxchars: max chars per word
Return: [maxwords, maxchars]
"""
word_ids = []
for word in sentence:
word_id = []
for ch in word:
word_id += char_ids(ch)
word_ids.append(arr_from(word_id, maxchars, PAD_ID))
pad_word = arr_from([], maxchars, PAD_ID)
return arr_from(word_ids, maxwords, pad_word)
| [
"somewehr@gmail.com"
] | somewehr@gmail.com |
ccd378de1b53378a3c870b312e33b6acad65dcc7 | 8928acc2ef95bdc3d0ef5b2e9ed35fc2c88b18cb | /lab2/Lab2/Q5.py | 246577157b9dda579ccf2417da15165c5e1f5028 | [] | no_license | coloriordanCIT/pythonlabs | e1d3eb82f3b3aa693eb6ecaa90013caa094fe881 | 248b7784750dd46bd64c121e6cf037d82e555666 | refs/heads/master | 2020-03-30T07:49:19.215153 | 2018-10-01T19:14:56 | 2018-10-01T19:14:56 | 150,968,777 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,622 | py | '''
Created on 01 Oct 2018
@author: colinoriordan
Application that will create a list and will populate it with the first 40 Fibonacci numbers.
The program will then ask the user to enter an integer value between 1 and 40 to indicate
which number in the Fibonacci series they would like to see
'''
#main function of the application
def main():
#create list of fibonacci numbers & pass it to selectFibonacciNum,
#where user can select which number they'd like to see
fibonacciList=generateFibonacciList()
selectFibonacciNum(fibonacciList)
"""
function - generateFibonacciList - creates a list of the first 40 fibonacci numbers
return - fibonacciList - list of the first 40 numbers in the fibonacci sequence
"""
def generateFibonacciList():
#assign the first two numbers of the fibonacci list
fibonacciList=[]
fibonacciList.append(0)
fibonacciList.append(1)
#loop to assing next fibonacci number to sum of previous two
for x in range (2, 40):
fibonacciList.append(fibonacciList[x-1]+fibonacciList[x-2])
return fibonacciList
""""
function - selectFibonacciNum - Gets user input of which fibonacci number they'd like to see and prints it.
arg - fibonacciList - list of the first 40 fibonacci numbers
"""
def selectFibonacciNum(fibonacciList):
num=100
#get num input
while num not in range(1, 41):
num=int(input("Enter the n'th fibonacci number you'd like to see (1-40): "))
#print the num'th number of the fibonacci sequence
print("The ", num, " fibonacci number is ", fibonacciList[num-1])
main() | [
"colinoriordan@192.168.1.9"
] | colinoriordan@192.168.1.9 |
dd289bbe11d653c04e5f33bf697ff022530a0ef8 | b7eb8279ebe2f525d27849d6ca24cc7270d30433 | /processing/b2_demultiplex_stats.py | c941dc97629b4495d6d94f77ebdff996cd4bb1a9 | [] | no_license | maxwshen/prime-peptide | d0da277521537c6e09dfeca4afbe3297893ed61b | d72244e85683583c812d3bd106b6874da0a17b80 | refs/heads/main | 2023-04-07T19:07:03.371146 | 2021-04-09T20:36:07 | 2021-04-09T20:36:07 | 356,391,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | py | #
from __future__ import division
import _config
import sys, os, fnmatch, datetime, subprocess
sys.path.append('/home/unix/maxwshen/')
import numpy as np
from collections import defaultdict
from mylib import util
import pandas as pd
# Default params
inp_dir = _config.OUT_PLACE + 'b_demultiplex/'
NAME = util.get_fn(__file__)
out_dir = _config.OUT_PLACE + NAME + '/'
util.ensure_dir_exists(out_dir)
exp_design = pd.read_csv(_config.DATA_DIR + 'exp_design.csv')
##
# Functions
##
def demultiplex_stats(nm):
num_lines = 0
for fn in os.listdir(inp_dir + nm + '/'):
if 'R1' not in fn: continue
lc = util.line_count(inp_dir + nm + '/' + fn)
if lc % 2 == 1:
print('Error: fq num lines is odd')
# import code; code.interact(local=dict(globals(), **locals()))
num_lines += lc
# divide by 4 for fastq
num_reads = num_lines / 4
print(f'{nm}: {num_reads} reads')
return
##
# qsub
##
# def gen_qsubs():
# # Generate qsub shell scripts and commands for easy parallelization
# print('Generating qsub scripts...')
# qsubs_dir = _config.QSUBS_DIR + NAME + '/'
# util.ensure_dir_exists(qsubs_dir)
# qsub_commands = []
# num_scripts = 0
# for idx in range(0, 60):
# command = 'python %s.py %s' % (NAME, idx)
# script_id = NAME.split('_')[0]
# # Write shell scripts
# sh_fn = qsubs_dir + 'q_%s_%s.sh' % (script_id, idx)
# with open(sh_fn, 'w') as f:
# f.write('#!/bin/bash\n%s\n' % (command))
# num_scripts += 1
# # Write qsub commands
# qsub_commands.append('qsub -V -wd %s %s' % (_config.SRC_DIR, sh_fn))
# # Save commands
# with open(qsubs_dir + '_commands.txt', 'w') as f:
# f.write('\n'.join(qsub_commands))
# print('Wrote %s shell scripts to %s' % (num_scripts, qsubs_dir))
# return
##
# Main
##
@util.time_dec
def main():
print(NAME)
for nm in exp_design['Name']:
demultiplex_stats(nm)
demultiplex_stats('other')
return out_dir
if __name__ == '__main__':
if len(sys.argv) > 1:
main(split = sys.argv[1])
else:
main() | [
"maxwshen@gmail.com"
] | maxwshen@gmail.com |
7440a80c7ca179e7b8ed050d6c5bec86b8f6c673 | 2f1b8b0c2ca4ae73763f58132405281a9779eac1 | /tests/tests/test_cluster.py | b8c3ab4c53347ee318f7d9e780cee08a86435d13 | [] | no_license | vincenzopennone/drep | 14db58f2999405b4ea16013ce45576c31693ec8b | fc0896affa666205605d790c30d2623291152c68 | refs/heads/master | 2023-06-03T23:18:13.362404 | 2021-06-24T17:02:20 | 2021-06-24T17:02:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,281 | py | import os
import glob
import shutil
import pandas as pd
import importlib
import logging
import pytest
import tests.test_utils as test_utils
import drep
from drep import argumentParser
from drep.controller import Controller
from drep.WorkDirectory import WorkDirectory
from drep.d_bonus import find_program
class Empty():
pass
@pytest.fixture()
def self():
# Set up
self = Empty()
self.genomes = test_utils.load_test_genomes()
self.broken_genome = test_utils.load_broken_genome()
self.wd_loc = test_utils.load_test_wd_loc()
self.test_dir = test_utils.load_random_test_dir()
self.s_wd_loc = test_utils.load_solutions_wd()
if os.path.isdir(self.test_dir):
shutil.rmtree(self.test_dir)
if os.path.isdir(self.wd_loc):
shutil.rmtree(self.wd_loc, ignore_errors=True)
if not os.path.isdir(self.test_dir):
os.mkdir(self.test_dir)
importlib.reload(logging)
yield self
# Teardown
logging.shutdown()
if os.path.isdir(self.wd_loc):
shutil.rmtree(self.wd_loc)
if os.path.isdir(self.test_dir):
shutil.rmtree(self.test_dir)
# class test_cluster():
# def __init__(self):
# pass
#
# def setUp(self):
# self.genomes = test_utils.load_test_genomes()
# self.broken_genome = test_utils.load_broken_genome()
# self.wd_loc = test_utils.load_test_wd_loc()
# self.test_dir = test_utils.load_random_test_dir()
# self.s_wd_loc = test_utils.load_solutions_wd()
#
# if os.path.isdir(self.test_dir):
# shutil.rmtree(self.test_dir)
#
# if os.path.isdir(self.wd_loc):
# shutil.rmtree(self.wd_loc, ignore_errors=True)
# if not os.path.isdir(self.test_dir):
# os.mkdir(self.test_dir)
#
# importlib.reload(logging)
#
# def tearDown(self):
# logging.shutdown()
# if os.path.isdir(self.wd_loc):
# shutil.rmtree(self.wd_loc)
# if os.path.isdir(self.test_dir):
# shutil.rmtree(self.test_dir)
#
# def run(self):
# # self.setUp()
# # self.test_all_vs_all_mash()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_list_genome_load()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_all_vs_all_mash()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_cluster_mash_database()
# # self.tearDown()
# #
# # # self.setUp()
# # # self.time_compare_genomes()
# # # self.tearDown()
# #
# # self.setUp()
# # self.test_goANI()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_goANI2()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_fastANI()
# # self.tearDown()
# #
# # self.setUp()
# # self.test_compare_genomes()
# # self.tearDown()
#
# self.setUp()
# self.test_genome_hierarchical_clustering()
# self.tearDown()
#
# self.setUp()
# self.functional_test_4()
# self.tearDown()
#
# self.setUp()
# self.functional_test_3()
# self.tearDown()
#
# self.setUp()
# self.functional_test_2()
# self.tearDown()
#
# self.setUp()
# self.functional_test_1()
# self.tearDown()
#
# self.setUp()
# self.skipsecondary_test()
# self.tearDown()
def test_list_genome_load(self):
'''
Test inputting a list of genomes via a text file
'''
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
# Make the list of genomes
if not os.path.exists(data_folder):
os.mkdir(data_folder)
genome_loc = os.path.join(data_folder, 'genomes.txt')
with open(genome_loc, 'w') as o:
for i, row in bdb.iterrows():
o.write(row['location'] + '\n')
# Test it out
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
# args = argumentParser.parse_args(['cluster',wd_loc,'--S_algorithm',\
# 'fastANI','-g',genome_loc])
# controller = Controller()
# controller.parseArguments(args)
args = argumentParser.parse_args(['dereplicate', wd_loc, '--S_algorithm', 'fastANI', '-g', genome_loc])
kwargs = vars(args)
# del kwargs['genomes']
# drep.d_cluster.d_cluster_wrapper(wd_loc, **kwargs)
drep.d_cluster.controller.d_cluster_wrapper(wd_loc, **kwargs)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Cdb.csv is correct
db1 = Swd.get_db('Cdb')
del db1['comparison_algorithm']
db2 = wd.get_db('Cdb')
del db2['comparison_algorithm']
assert test_utils.compare_dfs(db1, db2), "{0} is not the same!".format('Cdb')
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'fastANI', data_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.8)
def test_genome_hierarchical_clustering(self):
'''
Test d_cluster.test_genome_hierarchical_clustering
'''
wdS = drep.WorkDirectory.WorkDirectory(self.s_wd_loc)
Ndb = wdS.get_db('Ndb')
# Run clustering on Ndb
Cdb, c2ret = drep.d_cluster.utils._cluster_Ndb(Ndb, comp_method='ANImf')
g2c = Cdb.set_index('genome')['secondary_cluster'].to_dict()
assert g2c['Enterococcus_faecalis_T2.fna'] != g2c['Enterococcus_faecalis_TX0104.fa']
assert g2c['Enterococcus_faecalis_T2.fna'] == g2c['Enterococcus_faecalis_YI6-1.fna']
# Make sure storage is correct
wd = drep.WorkDirectory.WorkDirectory(self.wd_loc)
wd.store_special('secondary_linkages', c2ret)
wd.load_cached()
got = wd.get_cluster('secondary_linkage_cluster_1')
assert len(got) == 3
def test_compare_genomes(self):
'''
Test d_cluster.compare_genomes
'''
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
# Try gANI
loc, works = drep.d_bonus.find_program('ANIcalculator')
if works:
p_folder = os.path.join(data_folder, 'prodigal')
#print(p_folder)
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'gANI', data_folder, \
prod_folder = p_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.75)
# Try ANImf
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'ANImf', data_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.85) & (db['ani'].tolist()[0] < 0.86)
# Try ANIn
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'ANIn', data_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.85) & (db['ani'].tolist()[0] < 0.86)
def test_goANI(self):
'''
Test goANI
'''
import time
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
# Copy over prodigal
self.s_wd_loc = test_utils.load_solutions_wd()
p_folder = os.path.join(data_folder, 'data/prodigal/')
shutil.copytree(os.path.join(self.s_wd_loc, 'data/prodigal'), \
p_folder)
# Try goANI
p_folder = os.path.join(data_folder, 'data/prodigal/')
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'goANI', data_folder, \
prod_folder = p_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.8)
def test_goANI2(self):
'''
Test goANI in the case where the genomes share no genes
'''
import time
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
# Copy over prodigal
self.s_wd_loc = test_utils.load_solutions_wd()
p_folder = os.path.join(data_folder, 'data/prodigal/')
shutil.copytree(os.path.join(self.s_wd_loc, 'data/prodigal'), \
p_folder)
# Remove all but one gene in one of the prodigal files
p_folder = os.path.join(data_folder, 'data/prodigal/')
for f in glob.glob(p_folder + '*'):
if 'Escherichia_coli_Sakai.fna.fna' in f:
new_file = open(f + '.2', 'w')
old_file = open(f, 'r')
j = 0
for line in old_file.readlines():
if ((line[0] == '>') & (j != 0)):
break
j += 1
new_file.write(line.strip() + '/')
new_file.close()
old_file.close()
os.remove(f)
shutil.copy(f + '.2', f)
# Try goANI
p_folder = os.path.join(data_folder, 'data/prodigal/')
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'goANI', data_folder, \
prod_folder = p_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.8)
def test_fastANI(self):
'''
Test fastANI
'''
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, 'fastANI', data_folder)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.8)
@pytest.mark.skip(reason="You don't need to run this")
def test_time_compare_genomes(self):
'''
Time d_cluster.compare_genomes
'''
import time
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
data_folder = self.test_dir
for method in ['fastANI', 'ANIn', 'ANImf']:
# Try ANImf
start = time.time()
Ndb = drep.d_cluster.compare_utils.compare_genomes(bdb, method, data_folder, processors=1)
db = Ndb[(Ndb['reference'] == 'Enterococcus_faecalis_T2.fna')\
& (Ndb['querry'] == 'Enterococcus_casseliflavus_EC20.fasta')]
assert (db['ani'].tolist()[0] > 0.7) & (db['ani'].tolist()[0] < 0.9)
end = time.time()
comps = len(bdb) * len(bdb)
print("{1} time: {0:.2f} seconds for {2} comparisons ({3:.2f} seconds per comparison)".format(end-start, method, comps, (end-start)/comps))
def test_all_vs_all_mash(self):
'''
Test d_cluster.all_vs_all_MASH
'''
bdb = drep.d_cluster.utils.load_genomes(self.genomes)
bdb = drep.d_filter._add_lengthN50(bdb, bdb)
data_folder = self.test_dir
# Run it under normal conditions
Mdb, Cdb, cluster_ret = drep.d_cluster.compare_utils.all_vs_all_MASH(bdb, data_folder)
assert len(Mdb) == 25
db = Mdb[(Mdb['genome1'] == 'Enterococcus_faecalis_YI6-1.fna') & \
(Mdb['genome2'] == 'Enterococcus_faecalis_TX0104.fa')]
d = float(db['dist'].tolist()[0])
assert (d > .01) & (d < .02)
assert len(glob.glob(data_folder + '/MASH_files/sketches/*')) == 1
assert len(glob.glob(data_folder + '/MASH_files/sketches/*/*')) == 6
# Start over
shutil.rmtree(self.test_dir)
os.mkdir(self.test_dir)
# Run it under reduced chuck size
Mdb, Cdb, cluster_ret = drep.d_cluster.compare_utils.all_vs_all_MASH(bdb, data_folder, primary_chunksize=2, multiround_primary_clustering=True)
assert len(Mdb) != 25
db = Mdb[(Mdb['genome1'] == 'Enterococcus_faecalis_YI6-1.fna') & \
(Mdb['genome2'] == 'Enterococcus_faecalis_TX0104.fa')]
d = float(db['dist'].tolist()[0])
assert (d > .01) & (d < .02)
assert len(glob.glob(data_folder + '/MASH_files/sketches/*')) == 3
assert len(glob.glob(data_folder + '/MASH_files/sketches/*/*')) == 8
def test_cluster_mash_database(self):
'''
Test d_cluster.cluster_mash_database
'''
wdS = drep.WorkDirectory.WorkDirectory(self.s_wd_loc)
Mdb = wdS.get_db('Mdb')
# Make sure clustering is correct
Cdb, cluster_ret = drep.d_cluster.compare_utils.cluster_mash_database(Mdb)
g2c = Cdb.set_index('genome')['primary_cluster'].to_dict()
assert g2c['Enterococcus_faecalis_T2.fna'] == g2c['Enterococcus_faecalis_TX0104.fa']
assert g2c['Enterococcus_faecalis_T2.fna'] != g2c['Enterococcus_casseliflavus_EC20.fasta']
# Make sure storage is correct
wd = drep.WorkDirectory.WorkDirectory(self.wd_loc)
wd.store_special('primary_linkage', cluster_ret)
wd.load_cached()
got = wd.get_cluster('primary_linkage')
assert len(got) == 3
def test_cluster_functional_1(self):
'''
Cluster the 5 genomes using default settings
'''
genomes = self.genomes
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
args = argumentParser.parse_args(['dereplicate', wd_loc, '-g'] + genomes)
kwargs = vars(args)
drep.d_cluster.controller.d_cluster_wrapper(wd_loc, **kwargs)
# args = argumentParser.parse_args(['cluster',wd_loc,'-g']+genomes)
# controller = Controller()
# controller.parseArguments(args)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Cdb.csv is correct
db1 = Swd.get_db('Cdb')
db2 = wd.get_db('Cdb')
assert test_utils.compare_dfs(db1, db2), "{0} is not the same!".format('Cdb')
def test_cluster_functional_2(self):
'''
Cluster the 5 genomes using gANI
'''
genomes = self.genomes
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
# Make sure gANI is installed
loc, works = find_program('ANIcalculator')
if (loc == None or works == False):
print('Cannot locate the program {0}- skipping related tests'\
.format('ANIcalculator (for gANI)'))
return
args = argumentParser.parse_args(['cluster',wd_loc,'--S_algorithm',\
'gANI','-g']+genomes)
controller = Controller()
controller.parseArguments(args)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Cdb.csv is correct
db1 = Swd.get_db('Cdb')
del db1['comparison_algorithm']
db2 = wd.get_db('Cdb')
del db2['comparison_algorithm']
assert test_utils.compare_dfs(db1, db2), "{0} is not the same!".format('Cdb')
def test_cluster_functional_3(self):
'''
Cluster the 5 genomes using ANImf
'''
genomes = self.genomes
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
# args = argumentParser.parse_args(['cluster',wd_loc,'--S_algorithm',\
# 'ANImf','-g']+genomes)
# controller = Controller()
# controller.parseArguments(args)
args = argumentParser.parse_args(['dereplicate',wd_loc,'--S_algorithm', 'ANImf','-g']+genomes)
kwargs = vars(args)
drep.d_cluster.controller.d_cluster_wrapper(wd_loc, **kwargs)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Cdb.csv is correct
db1 = Swd.get_db('Cdb')
del db1['comparison_algorithm']
db2 = wd.get_db('Cdb')
del db2['comparison_algorithm']
assert test_utils.compare_dfs(db1, db2), "{0} is not the same!".format('Cdb')
def test_cluster_functional_4(self):
'''
Cluster the 5 genomes using fastANI
'''
genomes = self.genomes
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
args = argumentParser.parse_args(['dereplicate',wd_loc,'--S_algorithm',\
'fastANI','-g']+genomes)
# controller = Controller()
# controller.parseArguments(args)
# args = argumentParser.parse_args(['dereplicate', wd_loc, '--S_algorithm', 'ANImf', '-g'] + genomes)
kwargs = vars(args)
drep.d_cluster.controller.d_cluster_wrapper(wd_loc, **kwargs)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Cdb.csv is correct
db1 = Swd.get_db('Cdb')
del db1['comparison_algorithm']
db2 = wd.get_db('Cdb')
del db2['comparison_algorithm']
assert test_utils.compare_dfs(db1, db2), "{0} is not the same!".format('Cdb')
def test_skipsecondary(self):
genomes = self.genomes
wd_loc = self.wd_loc
s_wd_loc = self.s_wd_loc
args = argumentParser.parse_args(['dereplicate',wd_loc,'-g'] +genomes \
+ ['--SkipSecondary'])
# controller = Controller()
# controller.parseArguments(args)
kwargs = vars(args)
drep.d_cluster.controller.d_cluster_wrapper(wd_loc, **kwargs)
# Verify
Swd = WorkDirectory(s_wd_loc)
wd = WorkDirectory(wd_loc)
# Confirm Mdb.csv is correct
db1 = Swd.get_db('Mdb')
db2 = wd.get_db('Mdb')
#assert compare_dfs(db1, db2), "{0} is not the same!".format('Mdb')
# Confirm Ndb.csv doesn't exist
db2 = wd.get_db('Ndb')
assert db2.empty, 'Ndb is not empty' | [
"mattolm@gmail.com"
] | mattolm@gmail.com |
53f3c314f83658c91de872c57b6b1dc85e6919c0 | 758f1f9b7464c9ebe40a9235a463ae5099512b20 | /wlm_test/dev_io.py | 6c4c667051da4ef2b23be5381ef9fda1062c9745 | [] | no_license | chenlianllik/python_study | 3ef074337d0390ad04db065701239c27ccf73cba | 46eb490e6f92d162006e15392e5e1bd39cdba3bd | refs/heads/master | 2021-10-23T08:34:25.755469 | 2019-03-15T20:06:57 | 2019-03-15T20:06:57 | 115,893,817 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,570 | py | import os
import string
import time
import datetime
import random
from offset_parser import wlm_offset_parser
class wlan_device(object):
def __init__(self, device_port):
if device_port == 'sim':
self.device_port = device_port
self.__pwr_state = 'off'
self.__wlm_stats_req_time = time.time()
return None
out = os.popen('adb devices')
cmd_out = out.read()
if device_port in cmd_out:
print "find adb device:"+device_port
os.popen('adb -s ' + device_port + ' root')
time.sleep(3)
os.popen('adb -s ' + device_port + ' wait-for-device root')
else:
print "can't find adb device:"+self.device_port
self.device_port = None
return
self.device_port = device_port
self.__pwr_state = 'off'
self.__wlm_offset_map = None
self.__wlm_stats_req_time = time.time()
self.__wlam_last_ac_stats_dict = dict()
def get_rssi(self):
if self.device_port == 'sim':
rssi_meta_list = [random.randint(-5, 5), random.randint(-40, -45), random.randint(-30, -35), random.randint(-30, -45)]
print rssi_meta_list
return rssi_meta_list
if self.device_port != None:
#os.popen('adb -s ' + self.device_port + ' wait-for-device root')
rssi_meta_list = list()
out = os.popen('adb -s ' + self.device_port + ' shell iw wlan0 station dump')
cmd_out = out.read()
print cmd_out
if cmd_out.find('dBm') == -1:
return rssi_meta_list
ch0_rssi = int(cmd_out[cmd_out.find('[')+1:cmd_out.find(',')], 10)
ch1_rssi = int(cmd_out[cmd_out.find(',')+2:cmd_out.find(']')], 10)
cmb_rssi = int(cmd_out[cmd_out.find('dBm')-4:cmd_out.find('dBm')-1], 10)
rssi_meta_list = [float(ch0_rssi - ch1_rssi), float(ch0_rssi), float(ch1_rssi), float(cmb_rssi)]
print rssi_meta_list
return rssi_meta_list
def set_power_state(self, pwr_state):
if self.device_port == 'sim':
if pwr_state != self.__pwr_state:
self.__pwr_state = pwr_state
return
if self.device_port != None:
if pwr_state == 'off' and self.__pwr_state == 'on':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 19 3 1 0 1')
self.__pwr_state = pwr_state
elif pwr_state == 'on' and self.__pwr_state == 'off':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 19 3 1 0 0')
self.__pwr_state = pwr_state
def get_link_info(self):
if self.device_port != None:
link_info_dict = dict()
out = os.popen('adb -s ' + self.device_port + ' shell iw wlan0 link')
cmd_list = out.read().split('\n')
for cmd in cmd_list:
tmp_list = cmd.split(': ')
if 'SSID' in cmd:
link_info_dict[tmp_list[0]] = tmp_list[1]
if 'freq' in cmd:
link_info_dict[tmp_list[0]] = tmp_list[1]
return link_info_dict
def get_ping_latency(self, ip_addr, count):
if self.device_port == 'sim':
if ip_addr == '':
return float(-1)
out = os.popen('ping -n 1 -w 2 '+ ip_addr)
cmd_out = out.read()
print cmd_out
if "timed out" in cmd_out:
return 1000
elif "time<" in cmd_out:
return 1
else:
cmd_out = cmd_out[cmd_out.find('Average =')+len('Average ='):]
cmd_out = cmd_out[:cmd_out.find('ms')]
return float(cmd_out)
else:
if ip_addr == '':
return float(-1)
ping_cmd = 'adb -s {} shell ping -i 0.08 -c {} -W 1 {}'.format(self.device_port, count, ip_addr)
out = os.popen(ping_cmd)
cmd_out = out.read()
print cmd_out
if '100% packet loss' in cmd_out:
return float(1000)
cmd_out = cmd_out[cmd_out.find('mdev = ')+len('mdev = '):]
cmd_out = cmd_out[:cmd_out.find(' ms')]
print float(cmd_out.split('/')[1])
return float(cmd_out.split('/')[1])
def get_wlm_link_stats(self, stats_value_list):
wlm_link_stats_dict = {}
if self.device_port == 'sim':
wlm_link_stats_dict['timestamp'] = "{0:.3f}".format(time.time() - self.__wlm_stats_req_time)
wlm_link_stats_dict['pwr_on_period'] = random.randint(0, 100)
wlm_link_stats_dict['congestion_level'] = random.randint(0, 50)
wlm_link_stats_dict['bcn_rssi'] = random.randint(-96, 0)
wlm_link_stats_dict['scan_period'] = random.randint(0, 50)
wlm_link_stats_dict['phy_err'] = random.randint(0, 100)
wlm_link_stats_dict['mpdu_err'] = random.randint(0, 100)
wlm_link_stats_dict['last_tx_rate'] = random.randint(0, 100)
#self.__last_wlm_stats_req_time = time.time()
else:
wlm_link_stats_dict['timestamp'] = "{0:.3f}".format(time.time() - self.__wlm_stats_req_time)
wlm_link_stats_dict['pwr_on_period'] = int(stats_value_list[self.__wlm_offset_map['pwr_on_period'][0]], 16)
wlm_link_stats_dict['congestion_level'] = int(stats_value_list[self.__wlm_offset_map['congestion_level'][0]], 16)
tmp_str = stats_value_list[self.__wlm_offset_map['bcn_rssi'][0]]
tmp_str = tmp_str[len(tmp_str)-2:]
wlm_link_stats_dict['bcn_rssi'] = int(tmp_str, 16) - 256
wlm_link_stats_dict['scan_period'] = int(stats_value_list[self.__wlm_offset_map['scan_period'][0]], 16)
wlm_link_stats_dict['phy_err'] = int(stats_value_list[self.__wlm_offset_map['phy_err'][0]], 16)
wlm_link_stats_dict['mpdu_err'] = int(stats_value_list[self.__wlm_offset_map['mpdu_err'][0]], 16)
wlm_link_stats_dict['last_tx_rate'] = int(stats_value_list[self.__wlm_offset_map['last_tx_rate'][0]], 16)
#print wlm_link_stats_dict
return wlm_link_stats_dict
def get_wlm_ac_stats(self, stats_value_list):
wlm_ac_stats_dict = {}
if self.device_port == 'sim':
wlm_ac_stats_dict['tx_mpdu'] = [random.randint(0, 20),random.randint(0, 40),random.randint(0, 60),random.randint(0, 20)]
wlm_ac_stats_dict['rx_mpdu'] = [random.randint(0, 20),random.randint(0, 40),random.randint(0, 60),random.randint(0, 20)]
wlm_ac_stats_dict['tx_ampdu'] = [random.randint(0, 10),random.randint(0, 30),random.randint(0, 50),random.randint(0, 10)]
wlm_ac_stats_dict['rx_ampdu'] = [random.randint(0, 10),random.randint(0, 30),random.randint(0, 50),random.randint(0, 10)]
wlm_ac_stats_dict['mpdu_lost'] = [random.randint(0, 10),random.randint(0, 30),random.randint(0, 50),random.randint(0, 10)]
wlm_ac_stats_dict['retries'] = [random.randint(0, 10),random.randint(0, 10),random.randint(0, 10),random.randint(0, 10)]
wlm_ac_stats_dict['contention_time_avg'] = [random.randint(0, 100),random.randint(0, 100),random.randint(0, 100),random.randint(0, 100)]
#self.__last_wlm_stats_req_time = time.time()
else:
def __calc_wlm_ac_stats(stats_value_list, ac_stats_name):
tmp_list = [int(stats_value_list[offset], 16) for offset in self.__wlm_offset_map[ac_stats_name]]
if not self.__wlam_last_ac_stats_dict.has_key(ac_stats_name):
self.__wlam_last_ac_stats_dict[ac_stats_name] = tmp_list
return tmp_list
else:
delta_list = [tmp_list[i] - self.__wlam_last_ac_stats_dict[ac_stats_name][i] for i in xrange(len(tmp_list))]
self.__wlam_last_ac_stats_dict[ac_stats_name] = tmp_list
return delta_list
wlm_ac_stats_dict['tx_mpdu'] = __calc_wlm_ac_stats(stats_value_list, 'tx_mpdu')
wlm_ac_stats_dict['rx_mpdu'] = __calc_wlm_ac_stats(stats_value_list, 'rx_mpdu')
wlm_ac_stats_dict['tx_ampdu'] = __calc_wlm_ac_stats(stats_value_list, 'tx_ampdu')
wlm_ac_stats_dict['rx_ampdu'] = __calc_wlm_ac_stats(stats_value_list, 'rx_ampdu')
wlm_ac_stats_dict['mpdu_lost'] = __calc_wlm_ac_stats(stats_value_list, 'mpdu_lost')
wlm_ac_stats_dict['retries'] = __calc_wlm_ac_stats(stats_value_list, 'retries')
wlm_ac_stats_dict['contention_time_avg'] = [int(stats_value_list[offset], 16) for offset in self.__wlm_offset_map['contention_time_avg']]
#print wlm_ac_stats_dict
return wlm_ac_stats_dict
def get_wlm_stats(self):
if self.device_port == 'sim':
return self.get_wlm_link_stats(None), self.get_wlm_ac_stats(None)
else:
stats_value_list = []
out = os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 get_wlm_stats 3')
cmd_out = out.read()
cmd_out = cmd_out[cmd_out.find('data')+6:].rstrip()
stats_value_list = cmd_out.split(' ')
#print stats_value_list
return self.get_wlm_link_stats(stats_value_list), self.get_wlm_ac_stats(stats_value_list)
def prepare_wlm_stats(self):
if self.device_port == 'sim':
pass
else:
if self.__wlm_offset_map == None:
self.__wlm_offset_map = wlm_offset_parser('wlm_stats_offset_map.csv')
out = os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 get_wlm_stats 3')
cmd_out = out.read()
cmd_out = cmd_out[cmd_out.find('data')+6:].rstrip()
stats_value_list = cmd_out.split(' ')
self.get_wlm_ac_stats(stats_value_list)
def set_wlm_latency_mode(self, mode):
if self.device_port == 'sim':
pass
else:
if mode == 'ultra-low':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 0x2f 5 0 3 20 20 0xc83')
elif mode == 'Moderate':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 0x2f 5 0 1 60 60 0x8')
elif mode == 'low':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 0x2f 5 0 2 40 40 0x8a')
elif mode == 'normal':
os.popen('adb -s ' + self.device_port + ' shell iwpriv wlan0 setUnitTestCmd 0x2f 5 0 0 0 0 0x0 ')
else:
print "do not support this mode:{}".format(mode)
def get_wlan_device_list():
out = os.popen('adb devices')
cmd_list = out.read().split('\n')
dev_id = list()
for cmd in cmd_list:
if '\tdevice' in cmd:
dev_id.append(cmd[:cmd.find('\tdevice')])
return dev_id
if __name__ == '__main__':
wlan_dev = wlan_device('7e2cc7ce')
wlan_dev.prepare_wlm_stats()
test_count = 10
while test_count > 0:
wlan_dev.get_wlm_stats()
time.sleep(3)
test_count -= 1
| [
"chenlianllik@gmail.com"
] | chenlianllik@gmail.com |
329b5265d642b1e6b1b3b062231b4a750e9e02c9 | d82158e8cf7423dcb1aaaa648406c64f7cf2871a | /src/opinion_politic/methods/bert_lstm_model.py | 3da41c8b643f9906e749b783073394304ac8bf06 | [] | no_license | arahmatiiii/opinion_politic | 9c1b0736263a305b5c12d7348f2901c6cad90b74 | 83b4b23aff3db06b1169d6cd1cd63570e29fd643 | refs/heads/main | 2023-03-10T10:11:18.785884 | 2021-02-23T15:22:44 | 2021-02-23T15:22:44 | 319,080,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | """
bert_lstm_model.py is written for bert lstm model
"""
import torch
from torch import nn
class BertLstm(nn.Module):
"""
In this class we implement Bert lstm model
"""
def __init__(self, **kwargs):
super().__init__()
self.bert = kwargs['bert']
embedding_dim = kwargs['bert'].config.to_dict()['hidden_size']
self.lstm = nn.LSTM(embedding_dim,
hidden_size=kwargs["hidden_dim"],
num_layers=kwargs["n_layers"],
bidirectional=kwargs["bidirectional"],
dropout=kwargs["middle_dropout"] if kwargs["n_layers"] > 1 else 0)
self.fc = nn.Linear(kwargs["hidden_dim"] * 2 if kwargs["bidirectional"] else kwargs["hidden_dim"],
kwargs["output_dim"])
self.start_dropout = nn.Dropout(kwargs["start_dropout"])
self.middle_dropout = nn.Dropout(kwargs["middle_dropout"])
self.final_dropout = nn.Dropout(kwargs["final_dropout"])
def forward(self, text):
# text.size() = [batch size, sent len]
with torch.no_grad():
embedded = self.bert(text)[0]
# embedded.size() = [batch size, sent len, 768]
embedded = embedded.permute(1, 0, 2)
# pass embeddings into LSTM
outputs, (hidden, cell) = self.lstm(embedded)
# output.size() = [sent len, batch size, hid dim * n directions]
# hidden/cell = [n layers * n directions, batch size, hid dim]
if self.lstm.bidirectional:
hidden_can = torch.cat((hidden[-2, :, :], hidden[-1, :, :]), dim=1)
hidden_can = self.middle_dropout(hidden_can)
else:
hidden_can = hidden[-1, :, :]
hidden_can = self.middle_dropout(hidden_can)
return self.fc(hidden_can)
if __name__ == '__main__':
model = BertLstm(bert=bert_model, hidden_dim=256, n_layers=1, bidirectional=True, start_dropout=0.2,
middle_dropout=0.2, final_dropout=0.2, output_dim=2)
x = torch.rand((150, 64))
model.forward(x.long())
| [
"a.rahmati74@gmail.com"
] | a.rahmati74@gmail.com |
635fdeb7219a1c73f494564203158698ab93bbfc | cebcb859c851012bde92943a8cadef4d7cd0ae32 | /s11.py | 320079aa710c944820c31a2cf0697ad833c3e037 | [] | no_license | mskesselring/RFAntennaAutomation | 032d78bdad70ee14ff6511b114d805cac57e52bd | f1791ed026bad7aea029c7f3172624828b01f313 | refs/heads/master | 2020-06-23T00:58:42.219301 | 2019-07-23T15:18:50 | 2019-07-23T15:18:50 | 198,452,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,301 | py | ################################################################################
# Project: NCSU ECE PREAL 2.0 Senior Design Project
# File: s11.py
# Author(s): Matthew Kesselring
# Date: May 2019
################################################################################
# Local files
from networkAnalyzer import NetworkAnalyzer
from functions import *
from plotting import Plotting
# Standard libraries
import sys
import logging
from datetime import datetime
import numpy
# Installed libraries
motorSet = [] # Motor controller, contains motor objects
mc = None
db = None
mycursor = None
analyzer = None
# ==============================================================================
# Test routine
#
def sweep_s11(log, f1, f2, nums, rstart, angle, rstop, tpolar, cpolar):
# --------------------------------------------------------------------------
# Initialize values
#
ant_no = int(
numpy.floor((rstop - rstart) / angle) + 1) # Number of degree steps
# If meas 0-360, don't take measurement at 360
if (rstop == 360) and (rstart == 0):
ant_no = ant_no - 1
#
# End initialize values
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Reset motor positions
#
motorSet[STAND_ROTATION].goto_zero()
set_polarization(log, motorSet, tpolar, cpolar, mycursor)
#
# End reset motor positions
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Move test antenna to start degree position
#
log.info("Start Position: " + str(rstart))
motorSet[M1].rot_deg(rstart)
log.info("Motor setup complete")
#
# End move test antenna to start position
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Load state
#
analyzer.load_state()
#
# End load state
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Set network analyzer parameters
#
channel = 1
trace = 1
analyzer.setup(channel, trace)
# analyzer.enable_display(False)
# Set start frequency
start = float(analyzer.set_start(channel, f1))
if f1 != start:
msg = "WARNING: Invalid start frequency, using " + str(start)
print(msg)
log.warning(msg)
# f1_old = f1
f1 = start
# Set stop frequency
stop = float(analyzer.set_stop(channel, f2))
if f2 != stop:
msg = "WARNING: Invalid stop frequency, using " + str(stop)
print(msg)
log.warning(msg)
# f2_old = f2
f2 = stop
# Set number of points
points = int(analyzer.set_points(channel, nums))
if nums != points:
msg = "WARNING: Invalid number of steps, using " + str(points)
print(msg)
log.warning(msg)
# nums_old = nums
nums = points
# Create csv files
# d = datetime.today()
# file_name = os.path.join(DATA_PATH, d.strftime("%Y%m%d%H%M%S"))
# s11_filename = file_name + "_s11.csv"
s11_filename = os.path.join(DATA_PATH, "S11.csv")
s11File = open(s11_filename, "w")
#
# End set network analyzer parameters
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Check for network analyzer errors
log.info("Checking network analyzer error queue")
err_nums, err_msgs = analyzer.get_errors()
if len(err_nums) > 0:
msg = "Error in setting network analyzer parameters"
print(msg)
log.warning(msg)
else:
# No errors
log.info("No network analyzer errors detected")
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Measure S11 (actually S22)
#
log.info("Measuring S11")
print("Starting S11 Measurement")
print("Start Frequency: " + str(f1 / 1e9) + " GHz")
print("Stop Frequency: " + str(f2 / 1e9) + " GHz")
print("Number of Points: " + str(nums))
analyzer.set_measurement(channel, trace, 2, 2)
analyzer.trigger()
analyzer.update_display()
analyzer.auto_scale(channel, trace)
s11Freq = analyzer.get_x(channel)
s11Data = analyzer.get_corr_data(channel)
# s11Data = analyzer.get_form_data(channel)
# Write to csv file
log.info("Writing s11 data to file")
s11File.write(s11Freq)
s11File.write(s11Data)
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Check for network analyzer errors
log.info("Checking network analyzer error queue")
err_nums, err_msgs = analyzer.get_errors()
if len(err_nums) > 0:
msg = "Error measuring S11"
print(msg)
log.warning(msg)
else:
# No errors
msg = "S11 Measurement Successful"
print(msg)
log.info(msg)
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Reset motor positions
#
motorSet[STAND_ROTATION].goto_zero()
#
# End reset motor positions
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Close csv files
#
s11File.close()
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Update database
#
if db.is_connected():
fstart = f1 / 1e9
fstop = f2 / 1e9
rowcount = mycursor.rowcount
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Antenna polarization
#
log.info("Updating tpolar and cpolar in sql database")
update_config_db(log, mycursor, 0, "'antenna_polarization'")
update_config_db(log, mycursor, 0, "'chamber_polarization'")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Network analyzer parameters
#
log.info("Updating fstart, fstop, and nums in sql database")
update_config_db(log, mycursor, fstart, "'frequency_start'")
update_config_db(log, mycursor, fstop, "'frequency_stop'")
update_config_db(log, mycursor, nums, "'num_steps'")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Commit changes
log.info("Committing changes")
db.commit()
if rowcount == mycursor.rowcount:
log.warning("Failed to store updated antenna polarization data")
#
# End update database
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Call plotting function and write zip file
#
Plotting(f1, f2, nums, rstart, angle, rstop, 0, 0, 0, 0, 0, 0, "s11")
# create_zip(file_name, [s11_filename])
#
# End normalization
# --------------------------------------------------------------------------
#
# End test routine
# ==============================================================================
# ==============================================================================
# Main function
#
def s11(args):
rv = 0 # Initialize return value
# --------------------------------------------------------------------------
# Set up log file
#
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("S11") # Get local logger
# Create and format handler to write to file "log_[MONTH]_[YEAR].log"
handler = logging.FileHandler(
'log_' + datetime.today().strftime('%m_%Y') + '.log')
handler.setLevel(LOG_LEVEL)
formatter = logging.Formatter(
fmt='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
handler.setFormatter(formatter)
# Add handler to logger and specify properties
log.addHandler(handler)
log.setLevel(LOG_LEVEL)
# Get logger for pyvisa module and set level to log warnings and errors
visa_log = logging.getLogger('pyvisa')
visa_log.setLevel(IMPORT_LOG_LEVEL)
visa_log.addHandler(handler)
sql_log = logging.getLogger('mysql.connector')
sql_log.setLevel(IMPORT_LOG_LEVEL)
sql_log.addHandler(handler)
log.info("")
log.info("")
#
# End log setup
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Parse CL arguments
#
try:
f1 = float(args[0]) * 1e9
f2 = float(args[1]) * 1e9
nums = int(args[2])
rstart = float(args[3])
angle = float(args[4])
rstop = float(args[5])
tpolar = float(args[6])
cpolar = float(args[7])
except ValueError:
msg = "Error: Could not parse command line arguments " + str(args)
print(msg)
log.exception(msg)
return 1
except IndexError:
msg = "Error: Invalid number of command line arguments. " \
+ "Expected 3, received " + str(len(args))
print(msg)
log.exception(msg)
return 1
#
# End parse CL arguments
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Validate parameters
#
(f1, f2, nums, rstart, angle, rstop, tpolar, cpolar) = validate_parameters(
log, f1, f2, nums, rstart, angle, rstop, tpolar, cpolar)
#
# End validate parameters
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Attempt test
#
try:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Open motor controller
global mc
mc = motor_control_init(log)
# If mc object is empty
if not mc:
raise IOError('Opening motor controller failed')
else:
log.info("Motor controller " + str(mc) + " opened")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Open network analyzer
#
global analyzer
log.info("Attempting connection to network analyzer")
analyzer = NetworkAnalyzer()
log.info("Successfully connected to network analyzer")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Connect to database
#
global db, mycursor
log.info("Attempting connection to database")
db, mycursor = db_init()
if db.is_connected():
log.info("Successfully connected to database")
else:
raise Exception("Failed to connect to database")
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize motors
#
global motorSet
log.info("Initializing motors")
motorSet = motors_init(mc)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Run test routine
#
sweep_s11(log, f1, f2, nums, rstart, angle, rstop, tpolar, cpolar)
#
# End attempt alignment
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Handle exceptions/errors
#
except BaseException as e:
# log exception
# print("Error Measuring S11. Check log file for details")
print(e)
log.exception('Error from runTest.main():')
# Set return value to 1 (error)
rv = 1
#
# End handle exceptions/errors
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# Close instruments and return (always executed with or without
# exceptions/errors)
#
finally:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Close motor controller
#
if mc:
log.info("Closing motor controller " + str(mc))
# stop interactive mode (motor number does not matter)
motorSet[M1].quit_online()
mc.close()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Close network analyzer
#
if analyzer:
analyzer.enable_display(True)
log.info("Closing network analyzer")
analyzer.close()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Close database connection
#
if db:
if db.is_connected():
log.info("Closing database connection")
db.close()
return rv # Return 1 if error, 0 else
#
# End close instruments and return
# --------------------------------------------------------------------------
#
# End main function
# ==============================================================================
# ==============================================================================
# Enter from command line
#
if __name__ == "__main__":
argv = sys.argv # Store command line arguments
argv.pop(0) # Remove file name
# Call main function and pass return status to system
sys.exit(s11(argv))
#
# End enter from command line
# ==============================================================================
| [
"33323500+mskesselring@users.noreply.github.com"
] | 33323500+mskesselring@users.noreply.github.com |
6f6bbd7824afebb390fcad7b60006d07593eaeb0 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part005963.py | 536d9214289d3cb10209ba7b567a2e1a915c7dca | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher141988(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.3.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher141988._instance is None:
CommutativeMatcher141988._instance = CommutativeMatcher141988()
return CommutativeMatcher141988._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 141987
return
yield
from collections import deque | [
"franz.bonazzi@gmail.com"
] | franz.bonazzi@gmail.com |
45ed7bdd012349da002661ea64dd83789fb70d17 | 402fde0430220fc80f907fa78a68fb261e205f4d | /ciana/quadrado.py | 306524a1930bbf288aff4eed9da82a2cf322aecb | [] | no_license | cglima/python-learning | 2f697fc7d08cf40aa59fe7e4f0e0a456596444f5 | d293d4037d5e8ef21eb6d452d3cd2fcf9de3b0ed | refs/heads/master | 2020-09-11T11:48:22.108042 | 2020-02-29T04:44:44 | 2020-02-29T04:44:44 | 222,054,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | """Faça um programa em Python que receba (entrada de dados) o valor correspondente ao lado de um quadrado,
calcule e imprima (saída de dados) seu perímetro e sua área
Observação: a saída deve estar no formato: "perímetro: x - área: y """
lado = int(input('Digite o valor correspondente ao lado de um quadrado:'))
perimetro = lado * 4
area = lado ** 2
print('"perímetro:', perimetro, '-', 'área:', area,'"') | [
"lima.cglima@gmail.com"
] | lima.cglima@gmail.com |
da947df31b1862c93d63e99aaf8b086359dd6bf3 | 793c9844ebc3fb46b25fb5e6db421a074d7c7330 | /regression/controller/distributions.py | 2a2f698e70295ea1de26aa9e5a140919e78b7f03 | [] | no_license | pomonam/NoisyNaturalGradient | 7d904dc2612f955206ed786e51e0043c667b7485 | cd84820eb3abc3f6dad4fde691679b0af0aecdf3 | refs/heads/master | 2020-04-14T15:01:28.455766 | 2019-01-12T22:13:15 | 2019-01-12T22:13:15 | 163,914,052 | 63 | 12 | null | 2019-01-03T21:05:47 | 2019-01-03T02:36:48 | Python | UTF-8 | Python | false | false | 21,832 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from zhusuan.distributions import Distribution
from zhusuan.distributions.utils import assert_same_float_dtype
from zhusuan.model.stochastic import StochasticTensor
import tensorflow as tf
import numpy as np
class MatrixVariateNormal(StochasticTensor):
"""
The class of MatrixVariateNormal `StochasticTensor`.
See :class:`~zhusuan.model.base.StochasticTensor` for details.
:param mean: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, p). Each
slice `[i, j, ..., k, :, :]` represents the mean matrix of the
distribution.
:param u: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` represents the row variance matrix of the
distribution and should be positive definite.
:param v: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` represents the column variance matrix of the
distribution and should be positive definite.
:param u_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` uci has property uci uci^T = ui.
:param v_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` vci has property vci vci^T = vi..
:param group_event_ndims: A 0-D `int32` Tensor representing the number of
dimensions in `batch_shape` (counted from the end) that are grouped
into a single event, so that their probabilities are calculated
together. Default is 0, which means a single value is an event.
See :class:`~zhusuan.distributions.base.Distribution` for more detailed
explanation.
:param is_reparameterized: A Bool. If True, gradients on samples from this
distribution are allowed to propagate into inputs, using the
reparametrization trick from (Kingma, 2013).
:param check_numerics: Bool. Whether to check numeric issues.
"""
def __init__(self,
name,
mean,
u=None,
v=None,
u_c=None,
v_c=None,
u_c_logdet=None,
v_c_logdet=None,
n_samples=None,
group_event_ndims=0,
is_reparameterized=True,
check_numerics=False):
norm = DMatrixVariateNormal(
mean,
u=u,
v=v,
u_c=u_c,
v_c=v_c,
u_c_logdet=u_c_logdet,
v_c_logdet=v_c_logdet,
group_event_ndims=group_event_ndims,
is_reparameterized=is_reparameterized,
check_numerics=check_numerics
)
super(MatrixVariateNormal, self).__init__(
name, norm, n_samples)
class EigenMatrixVariateNormal(StochasticTensor):
"""
The class of MatrixVariateNormal `StochasticTensor`.
See :class:`~zhusuan.model.base.StochasticTensor` for details.
:param mean: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, p). Each
slice `[i, j, ..., k, :, :]` represents the mean matrix of the
distribution.
:param u: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` represents the row variance matrix of the
distribution and should be positive definite.
:param v: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` represents the column variance matrix of the
distribution and should be positive definite.
:param u_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` uci has property uci uci^T = ui.
:param v_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` vci has property vci vci^T = vi..
:param group_event_ndims: A 0-D `int32` Tensor representing the number of
dimensions in `batch_shape` (counted from the end) that are grouped
into a single event, so that their probabilities are calculated
together. Default is 0, which means a single value is an event.
See :class:`~zhusuan.distributions.base.Distribution` for more detailed
explanation.
:param is_reparameterized: A Bool. If True, gradients on samples from this
distribution are allowed to propagate into inputs, using the
reparametrization trick from (Kingma, 2013).
:param check_numerics: Bool. Whether to check numeric issues.
"""
def __init__(self,
name,
mean,
u_b=None,
v_b=None,
r=None,
n_samples=None,
group_event_ndims=0,
is_reparameterized=True,
check_numerics=False):
norm = EigenMultivariateNormal(
mean,
u_b=u_b,
v_b=v_b,
r=r,
group_event_ndims=group_event_ndims,
is_reparameterized=is_reparameterized,
check_numerics=check_numerics
)
super(EigenMatrixVariateNormal, self).__init__(
name, norm, n_samples)
class DMatrixVariateNormal(Distribution):
"""
The class of Matrix variate Normal distribution.
See :class:`~zhusuan.distributions.base.Distribution` for details.
:param mean: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, p). Each
slice `[i, j, ..., k, :, :]` represents the mean matrix of the
distribution.
:param u: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` represents the row variance matrix of the
distribution and should be positive definite.
:param v: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` represents the column variance matrix of the
distribution and should be positive definite.
:param u_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` uci has property uci uci^T = ui.
:param v_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` vci has property vci vci^T = vi..
:param group_event_ndims: A 0-D `int32` Tensor representing the number of
dimensions in `batch_shape` (counted from the end) that are grouped
into a single event, so that their probabilities are calculated
together. Default is 0, which means a single value is an event.
See :class:`~zhusuan.distributions.base.Distribution` for more detailed
explanation.
:param is_reparameterized: A Bool. If True, gradients on samples from this
distribution are allowed to propagate into inputs, using the
reparametrization trick from (Kingma, 2013).
:param check_numerics: Bool. Whether to check numeric issues.
"""
def __init__(self,
mean,
u=None,
v=None,
u_c=None,
v_c=None,
u_c_logdet = None,
v_c_logdet = None,
group_event_ndims=0,
is_reparameterized=True,
check_numerics=False):
mean = tf.convert_to_tensor(mean)
_assert_rank_op = tf.assert_greater_equal(
tf.rank(mean), 2,
message="mean should be at least a 2-D tensor.")
with tf.control_dependencies([_assert_rank_op]):
self._mean = mean
def _eig_decomp(mat):
mat_t = transpose_last2dims(mat)
e, v = tf.self_adjoint_eig((mat + mat_t) / 2 + tf.eye(tf.shape(mat)[-1]) * 1e-8)
e = tf.maximum(e, 1e-10) ** 0.5
return tf.matmul(v, tf.matrix_diag(e)), tf.reduce_sum(tf.log(e), -1)
if u is not None and v is not None:
# assert_same_rank([(self._mean, 'MatrixVariateNormal.mean'),
# (u, 'MatrixVariateNormal.u'),
# (v, 'MatrixVariateNormal.v')])
u = tf.convert_to_tensor(u)
_assert_shape_op_1 = tf.assert_equal(
tf.shape(mean)[-2], tf.shape(u)[-1],
message='second last dimension of mean should be the same \
as the last dimension of U matrix')
_assert_shape_op_2 = tf.assert_equal(
tf.shape(u)[-1], tf.shape(u)[-2],
message='second last dimension of U should be the same \
as the last dimension of U matrix')
with tf.control_dependencies([
_assert_shape_op_1, _assert_shape_op_2,
tf.check_numerics(u, 'U matrix')]):
self._u = u
v = tf.convert_to_tensor(v)
_assert_shape_op_1 = tf.assert_equal(
tf.shape(mean)[-1], tf.shape(v)[-1],
message='last dimension of mean should be the same \
as last dimension of V matrix')
_assert_shape_op_2 = tf.assert_equal(
tf.shape(v)[-1], tf.shape(v)[-2],
message='second last dimension of V should be the same \
as last dimension of V matrix')
with tf.control_dependencies([
_assert_shape_op_1, _assert_shape_op_2,
tf.check_numerics(v, 'V matrix')]):
self._v = v
dtype = assert_same_float_dtype([(self._mean, 'MatrixVariateNormal.mean'),
(self._u, 'MatrixVariateNormal.u'),
(self._v, 'MatrixVariateNormal.v')])
self._u_c, self._u_c_log_determinant = _eig_decomp(self._u)
self._v_c, self._v_c_log_determinant = _eig_decomp(self._v)
elif u_c is not None and v_c is not None:
# assert_same_rank([(self._mean, 'MatrixVariateNormal.mean'),
# (u_c, 'MatrixVariateNormal.u_c'),
# (v_c, 'MatrixVariateNormal.v_c')])
dtype = assert_same_float_dtype([(self._mean, 'MatrixVariateNormal.mean'),
(u_c, 'MatrixVariateNormal.u_c'),
(v_c, 'MatrixVariateNormal.v_c')])
self._u_c = u_c
self._v_c = v_c
self._u = tf.matmul(self._u_c, transpose_last2dims(self._u_c))
self._v = tf.matmul(self._v_c, transpose_last2dims(self._v_c))
if u_c_logdet is not None:
self._u_c_log_determinant = u_c_logdet
else:
_, self.u_c_log_determinant = _eig_decomp(self._u)
if v_c_logdet is not None:
self._v_c_log_determinant = v_c_logdet
else:
_, self._v_c_log_determinant = _eig_decomp(self._v)
super(DMatrixVariateNormal, self).__init__(
dtype=dtype,
param_dtype=dtype,
is_continuous=True,
is_reparameterized=is_reparameterized,
group_ndims=group_event_ndims)
@property
def mean(self):
"""The mean of the MatrixVariateNormal distribution."""
return self._mean
@property
def u(self):
"""The row variance matrix of the MatrixVariateNormal distribution."""
return self._u
@property
def v(self):
"""The column variance matrix of the MatrixVariateNormal distribution."""
return self._v
@property
def u_c(self):
"""
The cholesky decomposition of row variance matrix of the
MatrixVariateNormal distribution.
"""
return self._u_c
@property
def u_c_log_determinant(self):
"""
The log determinant of the cholesky decomposition matrix of the row
variance matrix.
"""
return self._u_c_log_determinant
@property
def v_c(self):
"""
The cholesky decomposition of column variance matrix of the
MatrixVariateNormal distribution.
"""
return self._v_c
@property
def v_c_log_determinant(self):
"""
The log determinant of the cholesky decomposition matrix of the column
variance matrix.
"""
return self._v_c_log_determinant
def _value_shape(self):
return tf.shape(self.mean)[-2:]
def _get_value_shape(self):
return self.mean.get_shape()[-2:]
def _batch_shape(self):
return tf.shape(self.mean)[:-2]
def _get_batch_shape(self):
return self.mean.get_shape()[:-2]
def _sample(self, n_samples):
mean, u_c, v_c = self.mean, self.u_c, self.v_c
if not self.is_reparameterized:
mean = tf.stop_gradient(mean)
u_c = tf.stop_gradient(u_c)
v_c = tf.stop_gradient(v_c)
u_c = tile_ntimes(u_c, n_samples)
v_c = tile_ntimes(v_c, n_samples)
shape = tf.concat([[n_samples], self.batch_shape, self.value_shape], 0)
epsilon = tf.random_normal(shape, dtype=self.dtype)
v_c_t = transpose_last2dims(v_c)
samples = mean + tf.matmul(tf.matmul(u_c, epsilon), v_c_t)
static_n_samples = n_samples if isinstance(n_samples, int) else None
samples.set_shape(
tf.TensorShape([static_n_samples]).concatenate(
self.get_batch_shape()).concatenate(self.get_value_shape()))
return samples
def _log_prob(self, given):
mean, u, v = self.mean, self.u, self.v
if not self.is_reparameterized:
mean = tf.stop_gradient(mean)
u = tf.stop_gradient(u)
v = tf.stop_gradient(v)
u_inv = tile_ntimes(tf.matrix_inverse(u), tf.shape(given)[0])
v_inv = tile_ntimes(tf.matrix_inverse(v), tf.shape(given)[0])
E = given - mean
Et = transpose_last2dims(given-mean)
log_no = -0.5 * tf.trace(tf.matmul(tf.matmul(E, v_inv), tf.matmul(Et, u_inv)))
p = tf.cast(tf.shape(mean)[-1], tf.float32)
n = tf.cast(tf.shape(mean)[-2], tf.float32)
log_de = 0.5 * n * p * np.log(2. * np.pi) \
+ n * self.v_c_log_determinant \
+ p * self.u_c_log_determinant
log_prob = log_no - log_de
return log_prob
def _prob(self, given):
return tf.exp(self._log_prob(self, given))
class EigenMultivariateNormal(Distribution):
"""
The class of EigenMulti distribution.
See :class:`~zhusuan.distributions.base.Distribution` for details.
:param mean: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, p). Each
slice `[i, j, ..., k, :, :]` represents the mean matrix of the
distribution.
:param u: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` represents the row variance matrix of the
distribution and should be positive definite.
:param v: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` represents the column variance matrix of the
distribution and should be positive definite.
:param u_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., n, n). Each
slice `[i, j, ..., k, :, :]` uci has property uci uci^T = ui.
:param v_c: A (N+2)-D (N >= 0) `float` Tensor of shape (..., p, p). Each
slice `[i, j, ..., k, :, :]` vci has property vci vci^T = vi..
:param group_event_ndims: A 0-D `int32` Tensor representing the number of
dimensions in `batch_shape` (counted from the end) that are grouped
into a single event, so that their probabilities are calculated
together. Default is 0, which means a single value is an event.
See :class:`~zhusuan.distributions.base.Distribution` for more detailed
explanation.
:param is_reparameterized: A Bool. If True, gradients on samples from this
distribution are allowed to propagate into inputs, using the
reparametrization trick from (Kingma, 2013).
:param check_numerics: Bool. Whether to check numeric issues.
"""
def __init__(self,
mean,
u_b=None,
v_b=None,
r=None,
group_event_ndims=0,
is_reparameterized=True,
check_numerics=False):
mean = tf.convert_to_tensor(mean)
_assert_rank_op = tf.assert_greater_equal(
tf.rank(mean), 2,
message="mean should be at least a 2-D tensor.")
with tf.control_dependencies([_assert_rank_op]):
self._mean = mean
# assert_same_rank([(self._mean, 'EigenMatrixNormal.mean'),
# (u_b, 'EigenMatrixNormal.u_b'),
# (v_b, 'EigenMatrixNormal.v_b'),
# (r, 'EigenMatrixNormal.r')])
u_b = tf.convert_to_tensor(u_b)
self._u_b = u_b
# _assert_shape_op_1 = tf.assert_equal(
# tf.shape(mean)[-2], tf.shape(u)[-1],
# message='second last dimension of mean should be the same \
# as the last dimension of U matrix')
# _assert_shape_op_2 = tf.assert_equal(
# tf.shape(u)[-1], tf.shape(u)[-2],
# message='second last dimension of U should be the same \
# as the last dimension of U matrix')
# with tf.control_dependencies([
# _assert_shape_op_1, _assert_shape_op_2,
# tf.check_numerics(u, 'U matrix')]):
v_b = tf.convert_to_tensor(v_b)
self._v_b = v_b
# _assert_shape_op_1 = tf.assert_equal(
# tf.shape(mean)[-1], tf.shape(v)[-1],
# message='last dimension of mean should be the same \
# as last dimension of V matrix')
# _assert_shape_op_2 = tf.assert_equal(
# tf.shape(v)[-1], tf.shape(v)[-2],
# message='second last dimension of V should be the same \
# as last dimension of V matrix')
# with tf.control_dependencies([
# _assert_shape_op_1, _assert_shape_op_2,
# tf.check_numerics(v, 'V matrix')]):
r = tf.convert_to_tensor(r)
self._r = r
# _assert_shape_op_1 = tf.assert_equal(
# tf.shape(mean)[-1], tf.shape(r)[-1],
# message='second last dimension of mean should be the same \
# as the last dimension of U matrix')
# _assert_shape_op_2 = tf.assert_equal(
# tf.shape(mean)[-2], tf.shape(r)[-2],
# message='second last dimension of U should be the same \
# as the last dimension of U matrix')
# with tf.control_dependencies([
# _assert_shape_op_1, _assert_shape_op_2,
# tf.check_numerics(r, 'R matrix')]):
# self._r = r
dtype = assert_same_float_dtype([(self._mean, 'MatrixVariateNormal.mean'),
(self._u_b, 'MatrixVariateNormal.u_b'),
(self._v_b, 'MatrixVariateNormal.v_b'),
(self._r, 'MatrixVariateNormal.r')])
# R should have been damped before. Sqrt for sampling.
# self._r_c = tf.sqrt(self._r)
self.log_std = 0.5 * tf.log(self._r)
self.std = tf.exp(self.log_std)
super(EigenMultivariateNormal, self).__init__(
dtype=dtype,
param_dtype=dtype,
is_continuous=True,
is_reparameterized=is_reparameterized,
group_ndims=group_event_ndims)
@property
def mean(self):
"""The mean of the MatrixVariateNormal distribution."""
return self._mean
@property
def r(self):
return self._r
@property
def u_b(self):
return self._u_b
@property
def v_b(self):
return self._v_b
@property
def r_c(self):
return self._r_c
def _value_shape(self):
return tf.shape(self.mean)[-2:]
def _get_value_shape(self):
return self.mean.get_shape()[-2:]
def _batch_shape(self):
return tf.shape(self.mean)[:-2]
def _get_batch_shape(self):
return self.mean.get_shape()[:-2]
def _sample(self, n_samples):
mean, u_b, v_b, std = self.mean, self.u_b, self.v_b, self.std
if not self.is_reparameterized:
mean = tf.stop_gradient(mean)
u_b = tf.stop_gradient(u_b)
v_b = tf.stop_gradient(v_b)
std = tf.stop_gradient(std)
u_b = tile_ntimes(u_b, n_samples)
v_b = tile_ntimes(v_b, n_samples)
std = tile_ntimes(std, n_samples)
shape = tf.concat([[n_samples], self.batch_shape, self.value_shape], 0)
epsilon = tf.random_normal(shape, dtype=self.dtype)
epsilon = tf.multiply(epsilon, std)
v_b_t = transpose_last2dims(v_b)
samples = mean + tf.matmul(u_b, tf.matmul(epsilon, v_b_t))
static_n_samples = n_samples if isinstance(n_samples, int) else None
samples.set_shape(
tf.TensorShape([static_n_samples]).concatenate(
self.get_batch_shape()).concatenate(self.get_value_shape()))
return samples
def _log_prob(self, given):
raise NotImplementedError()
def _prob(self, given):
raise NotImplementedError()
def transpose_last2dims(mat):
n = len(mat.get_shape())
return tf.transpose(mat, list(range(n-2)) + [n-1, n-2])
def tile_ntimes(mat, n_particles):
n = len(mat.get_shape())
return tf.tile(tf.expand_dims(mat, 0), [n_particles] + [1]*n)
| [
"pomonam15@gmail.com"
] | pomonam15@gmail.com |
d1348a15bf03228bf313efb82a3a2b7757051401 | 12990ac50a674812c6c997d5ecd4e979cf1a2e6a | /venv/bin/django-admin | 67cb8118c1a4d82130e3680c9eb2dd7c2c9c8184 | [] | no_license | lc8681/home_web | 0aaa1208aea815027d7a923c2e31d63d209ffa3f | aa5b8c8a633c1e9a0d034a2f9dd234c662468c8c | refs/heads/master | 2020-03-25T20:49:16.749574 | 2018-08-17T16:37:39 | 2018-08-17T16:37:39 | 144,147,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | #!/Users/lichen/Documents/PycharmProjects/home_web/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"lichen@redefine.global"
] | lichen@redefine.global | |
097a48bc0ec87840edcb99d338dd936647e75d11 | 53acd601a196a43e33df52ddab532a49989f52a9 | /Item/migrations/0001_initial.py | 35f0cbea418a230d0633db4487d1b3ce723e880a | [] | no_license | Songchip/point_mall_api | 0f8298f6d68ed3f5940e91981c145c4f3a02f9c8 | 60e26cb3d54d24d6262e16c1ae86d523b886f93d | refs/heads/master | 2022-12-12T07:24:26.849322 | 2019-08-29T08:02:13 | 2019-08-29T08:02:13 | 199,996,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | # Generated by Django 2.2.3 on 2019-07-25 02:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('price', models.IntegerField(default=0)),
('description', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('image', models.ImageField(upload_to='uploads/item_images/')),
],
),
migrations.CreateModel(
name='UserItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=0)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='Item.Item')),
],
),
]
| [
"scv487@naver.com"
] | scv487@naver.com |
315db68787f26dc6726dae929582108069d5348d | 37c8d45c5b7fca7104d80fe1e25df8f22afa223a | /binarychop.py | 56390fb5d7b0839c2ed5d13e6f939020e3638b68 | [] | no_license | oxford-code-kats/binary-chop | d486a54ab1b4e2b5c99bb27d8c82285cff547d6d | 93a609e7dca0792f891e9093d0316b12e527a35f | refs/heads/master | 2021-01-10T05:22:00.723536 | 2015-09-27T15:38:34 | 2015-09-27T15:38:34 | 43,252,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | from __future__ import division
MISSING = -1
def chop(target, search_list):
if not search_list:
return MISSING
if (len(search_list) == 1):
if (target != search_list[0]):
return -1
else:
return -1
middle_idx = find_middle_idx(search_list)
if target == search_list[middle_idx]:
return middle_idx
elif target < search_list[middle_idx]:
chop(target, search_list[:middle_idx + 1])
else:
chop(target, search_list[middle_idx + 1:])
def find_middle_idx(search_list):
"""returns the middle index of list
(rounds up for lists of odd length)"""
middle_idx = int((len(search_list) - 1) / 2)
return middle_idx | [
"lmagosi@Leratos-MacBook-Pro.local"
] | lmagosi@Leratos-MacBook-Pro.local |
bf4938d9e73a26fe0757893e1a32b04c141a9cdb | d1845a132213f2239fb0fea3502982dcfbdaca08 | /youtube-favourites-export.py | 965eff77359c96636c638fe0f16b20fabf00c131 | [
"MIT"
] | permissive | dw/scratch | 361e9dac7693061b66ccd064633f4ed09875e1b2 | c22c84d4d2d0347283e70192458ea50e08efcadb | refs/heads/master | 2021-01-17T12:21:52.423557 | 2019-06-11T00:09:30 | 2019-06-11T00:09:30 | 3,239,854 | 30 | 10 | null | null | null | null | UTF-8 | Python | false | false | 982 | py |
import gdata.youtube.client
client = gdata.youtube.client.YouTubeClient()
client.client_login('email@gmail.com', 'password', 'exporter')
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/favorites'
while True:
print 'Fetch', uri
feed = client.get_videos(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-favorites.xml', 'w') as fp:
fp.write(feed.to_string())
# get subs
#
entries = []
uri = 'https://gdata.youtube.com/feeds/api/users/default/subscriptions'
while True:
print 'Fetch', uri
feed = client.get_feed(uri=uri, **{'max-results': 50})
entries += feed.entry
if not feed.get_next_link():
break
uri = feed.get_next_link().href
feed.entry = entries
print 'total', len(entries)
with open('youtube-subs.xml', 'w') as fp:
fp.write(feed.to_string())
| [
"dw@botanicus.net"
] | dw@botanicus.net |
7591aeb499a8aa9e7b3f60a415b9028c84aa17c2 | 22e69109a5bdeb9e9cdbafb87383fcf2d6bd4cc9 | /src/count_indels_mutations.py | 756ad90681ecfdfba51605dde421ed3bdb11f0f6 | [] | no_license | smacra2/EvoSeqGAN | ad1080472ae82eaa09563a877c86944dffaebc7a | c055e008ec7457204c3431b3d7da38ea38be3501 | refs/heads/master | 2022-09-22T13:57:54.357074 | 2022-08-24T09:02:43 | 2022-08-24T09:02:43 | 161,592,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,168 | py | import math
import pickle
from itertools import product
import numpy as np
def count_mutations(file_name, mutation_val=5, deletion_val=5, insertion_val=5):
f = open(file_name)
data = []
for row in f:
if row[0] != '\n' and row != '--------------------\n':
data.append(row.rstrip())
print(len(data))
# for i in range(len(data)):
# print(data[i])
mutations_list = []
insertions_list = []
deletions_list = []
conservation_types = {}
mutation_types = {}
for j in range(0, len(data), 2):
anc = data[j]
des = data[j + 1]
num_insertions = 0
num_deletions = 0
num_mutations = 0
for i in range(min(len(anc), len(des))):
if anc[i] != des[i]:
if des[i] == '-':
num_deletions += 1
elif anc[i] == '-':
num_insertions += 1
else:
num_mutations += 1
temp = anc[i] + "->" + des[i]
if temp in mutation_types:
mutation_types[temp] += 1
else:
mutation_types[temp] = 1
else:
temp = anc[i] + "->" + des[i]
if temp in conservation_types:
conservation_types[temp] += 1
else:
conservation_types[temp] = 1
# if num_mutations > mutation_val:
# print("MUTATION")
# if num_deletions > deletion_val:
# print("DELETION")
# if num_insertions > insertion_val:
# print("INSERTION")
# if num_mutations > 0 or num_insertions > 0 or num_deletions > 0:
# print(anc + '\n' + des)
mutations_list.append(num_mutations)
insertions_list.append(num_insertions)
deletions_list.append(num_deletions)
print(mutations_list)
print(insertions_list)
print(deletions_list)
print("Number of sequences compared: ", len(mutations_list))
print("Max number of mutations in alignment: ", np.max(mutations_list))
print("Mean number of mutations in alignment: ", np.mean(mutations_list))
print("Median number of mutations in alignment: ", np.median(mutations_list))
print("Max number of insertions in alignment: ", np.max(insertions_list))
print("Mean number of insertions in alignment: ", np.mean(insertions_list))
print("Median number of insertions in alignment: ", np.median(insertions_list))
print("Max number of deletions in alignment: ", np.max(deletions_list))
print("Mean number of deletions in alignment: ", np.mean(deletions_list))
print("Median number of deletions in alignment: ", np.median(deletions_list))
print(conservation_types)
print(mutation_types)
total = 0
total_A = 0
total_C = 0
total_G = 0
total_T = 0
total_gap = 0
for c in conservation_types:
total += conservation_types[c]
if c.startswith('A'):
total_A += conservation_types[c]
elif c.startswith('C'):
total_C += conservation_types[c]
elif c.startswith('G'):
total_G += conservation_types[c]
elif c.startswith('T'):
total_T += conservation_types[c]
elif c.startswith('-'):
total_gap += conservation_types[c]
print(total)
print(total_A)
print(total_C)
print(total_G)
print(total_T)
print(total_gap)
for m in mutation_types:
total += mutation_types[m]
if m.startswith('A'):
total_A += mutation_types[m]
elif m.startswith('C'):
total_C += mutation_types[m]
elif m.startswith('G'):
total_G += mutation_types[m]
elif m.startswith('T'):
total_T += mutation_types[m]
elif m.startswith('-'):
total_gap += mutation_types[m]
print('Final total:', total)
print(total_A)
print(total_C)
print(total_G)
print(total_T)
print(total_gap)
mutation_probabilities = {}
for c in conservation_types:
mutation_probabilities[c] = conservation_types[c] / total
for m in mutation_types:
mutation_probabilities[m] = mutation_types[m] / total
# print(mutation_probabilities)
conditional_mutation_probabilities = {}
for c in conservation_types:
if c.startswith('A'):
conditional_mutation_probabilities[c] = conservation_types[c] / total_A
elif c.startswith('C'):
conditional_mutation_probabilities[c] = conservation_types[c] / total_C
elif c.startswith('G'):
conditional_mutation_probabilities[c] = conservation_types[c] / total_G
elif c.startswith('T'):
conditional_mutation_probabilities[c] = conservation_types[c] / total_T
elif c.startswith('-'):
conditional_mutation_probabilities[c] = conservation_types[c] / total_gap
for m in mutation_types:
if m.startswith('A'):
conditional_mutation_probabilities[m] = mutation_types[m] / total_A
elif m.startswith('C'):
conditional_mutation_probabilities[m] = mutation_types[m] / total_C
elif m.startswith('G'):
conditional_mutation_probabilities[m] = mutation_types[m] / total_G
elif m.startswith('T'):
conditional_mutation_probabilities[m] = mutation_types[m] / total_T
elif m.startswith('-'):
conditional_mutation_probabilities[m] = mutation_types[m] / total_gap
# print(conditional_mutation_probabilities)
return mutation_probabilities, conditional_mutation_probabilities
def main():
probabilities, conditional_probabilities = count_mutations('generator_indel_output_test_des.txt', mutation_val=40,
deletion_val=12, insertion_val=6)
indel_reference = {'G->G': 0.2054911822523275, 'T->T': 0.27636391545576244, 'C->C': 0.20439245909242262,
'A->A': 0.2742263718361839, 'G->A': 0.003981774603454654, 'A->G': 0.005505198548201901,
'A->T': 0.0008561874224453078, 'C->G': 0.0010380014774439424, 'T->A': 0.0008802888994914589,
'T->C': 0.0055226311699488745, 'G->T': 0.0010803545584667903, 'T->G': 0.001058827025571199,
'A->C': 0.0010411604089014476, 'C->T': 0.003862554190299179, '-->C': 0.0008436686940766759,
'C->A': 0.0010912353223759752, '-->T': 0.00125514876578208, '-->A': 0.001252340826708742,
'T->-': 0.0026550233913024556, 'A->-': 0.002630804916794915, '-->G': 0.0007079516388653399,
'C->-': 0.0015933884266579185, 'G->-': 0.0016135119900168406, 'G->C': 0.0010560190864978612}
indel_condition_reference = {'G->G': 0.963739062156199, 'T->T': 0.96468602952047, 'C->C': 0.9642170774704246,
'A->A': 0.9647035774686031, 'G->A': 0.018674240325011783, 'A->G': 0.019366790650236682,
'A->T': 0.0030119899260090967, 'C->G': 0.004896749887129694,
'T->A': 0.003072768750872946,
'T->C': 0.01927749911582345, 'G->T': 0.00506678621223985, 'T->G': 0.003695980488490186,
'A->C': 0.0036627081650116087, 'C->T': 0.01822151721975876,
'-->C': 0.20784573701504583,
'C->A': 0.005147879418085963, '-->T': 0.30921773217271, '-->A': 0.3085259699083415,
'T->-': 0.009267722124343403, 'A->-': 0.009254933790139457, '-->G': 0.1744105609039027,
'C->-': 0.007516776004600914, 'G->-': 0.0075672567308858315,
'G->C': 0.004952654575663513}
# For convenience
# -->A: 0.001252340826708742
# -->C: 0.0008436686940766759
# -->G: 0.0007079516388653399
# -->T: 0.00125514876578208
# A->-: 0.002630804916794915
# A->A: 0.2742263718361839
# A->C: 0.0010411604089014476
# A->G: 0.005505198548201901
# A->T: 0.0008561874224453078
# C->-: 0.0015933884266579185
# C->A: 0.0010912353223759752
# C->C: 0.20439245909242262
# C->G: 0.0010380014774439424
# C->T: 0.003862554190299179
# G->-: 0.0016135119900168406
# G->A: 0.003981774603454654
# G->C: 0.0010560190864978612
# G->G: 0.2054911822523275
# G->T: 0.0010803545584667903
# T->-: 0.0026550233913024556
# T->A: 0.0008802888994914589
# T->C: 0.0055226311699488745
# T->G: 0.001058827025571199
# T->T: 0.27636391545576244
# Conditional probabilities on ancestor character
# -->A: 0.3085259699083415
# -->C: 0.20784573701504583
# -->G: 0.1744105609039027
# -->T: 0.30921773217271
# A->-: 0.009254933790139457
# A->A: 0.9647035774686031
# A->C: 0.0036627081650116087
# A->G: 0.019366790650236682
# A->T: 0.0030119899260090967
# C->-: 0.007516776004600914
# C->A: 0.005147879418085963
# C->C: 0.9642170774704246
# C->G: 0.004896749887129694
# C->T: 0.01822151721975876
# G->-: 0.0075672567308858315
# G->A: 0.018674240325011783
# G->C: 0.004952654575663513
# G->G: 0.963739062156199
# G->T: 0.00506678621223985
# T->-: 0.009267722124343403
# T->A: 0.003072768750872946
# T->C: 0.01927749911582345
# T->G: 0.003695980488490186
# T->T: 0.96468602952047
for key in sorted(probabilities.keys()):
print(key, " : ", probabilities[key])
# Make sure every substitution in reference dictionary is in generated dictionary, even with probability 0
for key in indel_reference:
if key not in probabilities:
probabilities[key] = 0
print("Missing key: ", key)
euclidean_distance = 0
for key in probabilities:
for key2 in indel_reference:
if key == key2:
euclidean_distance += (indel_reference[key] - probabilities[key]) ** 2
print("Squared Euclidean Distance: ", euclidean_distance)
euclidean_distance = math.sqrt(euclidean_distance)
print("Euclidean Distance: ", euclidean_distance)
kl_divergence = 0
for key in probabilities:
for key2 in indel_reference:
if key == key2:
temp = math.log((probabilities[key] / indel_reference[key]) + 1e-8) * probabilities[key]
kl_divergence += temp
print("K-L Divergence: ", kl_divergence)
print("\nNow repeating for conditional probabilities....")
for key in sorted(conditional_probabilities.keys()):
print(key, " : ", conditional_probabilities[key])
for key in indel_condition_reference:
if key not in conditional_probabilities:
conditional_probabilities[key] = 0
print("Missing key: ", key)
euclidean_distance = 0
for key in conditional_probabilities:
for key2 in indel_condition_reference:
if key == key2:
euclidean_distance += (indel_condition_reference[key] - conditional_probabilities[key]) ** 2
print("Squared Euclidean Distance: ", euclidean_distance)
euclidean_distance = math.sqrt(euclidean_distance)
print("Euclidean Distance: ", euclidean_distance)
kl_divergence = 0
for key in conditional_probabilities:
for key2 in indel_condition_reference:
if key == key2:
temp = math.log((conditional_probabilities[key] / indel_condition_reference[key]) + 1e-8) * conditional_probabilities[key]
kl_divergence += temp
print("K-L Divergence: ", kl_divergence)
def count_extended_mutations(file_name):
f = open(file_name)
data = []
for row in f:
if row[0] != '\n' and row != '--------------------\n':
data.append(row.rstrip())
print(len(data))
alphabet = ['A', 'C', 'G', 'T', '-']
tri_mutation_types = {}
di_mutation_types = {}
single_mutation_types = {}
tri = list(product(alphabet, repeat=3)) # trinucleotides
di = list(product(alphabet, repeat=2)) # dinucleotides
single = list(product(alphabet, repeat=1)) # single nucleotides
for i in range(len(tri)):
tri[i] = ''.join(tri[i])
for i in range(len(di)):
di[i] = ''.join(di[i])
for i in range(len(single)):
single[i] = ''.join(single[i])
for i in range(len(tri)):
for j in range(len(tri)):
temp = tri[i] + '->' + tri[j]
tri_mutation_types[temp] = 0
print(len(tri_mutation_types))
for i in range(len(di)):
for j in range(len(di)):
temp = di[i] + '->' + di[j]
di_mutation_types[temp] = 0
print(len(di_mutation_types))
for i in range(len(single)):
for j in range(len(single)):
temp = single[i] + '->' + single[j]
single_mutation_types[temp] = 0
print(len(single_mutation_types))
tri_count = 0
di_count = 0
single_count = 0
for j in range(0, len(data), 2):
anc = data[j]
des = data[j + 1]
for i in range(0, min(len(anc), len(des)) - 2, 1):
temp_anc = anc[i] + anc[i + 1] + anc[i + 2]
des_anc = des[i] + des[i + 1] + des[i + 2]
temp = temp_anc + '->' + des_anc
if temp in tri_mutation_types:
tri_mutation_types[temp] += 1
else:
print("Unexpected mutation not found in dictionary")
tri_mutation_types[temp] = 1
tri_count += 1
for i in range(0, min(len(anc), len(des)) - 1, 1):
temp_anc = anc[i] + anc[i + 1]
des_anc = des[i] + des[i + 1]
temp = temp_anc + '->' + des_anc
if temp in di_mutation_types:
di_mutation_types[temp] += 1
else:
print("Unexpected mutation not found in dictionary")
di_mutation_types[temp] = 1
di_count += 1
for i in range(0, min(len(anc), len(des)), 1):
temp_anc = anc[i]
des_anc = des[i]
temp = temp_anc + '->' + des_anc
if temp in single_mutation_types:
single_mutation_types[temp] += 1
else:
print("Unexpected mutation not found in dictionary")
single_mutation_types[temp] = 1
single_count += 1
print(tri_mutation_types)
print(di_mutation_types)
print(single_mutation_types)
print()
print(tri_count)
print(di_count)
print(single_count)
print()
# Increment by 1 for smoothing when later computing divergence values
for tri in tri_mutation_types:
tri_mutation_types[tri] = (tri_mutation_types[tri] + 1) / tri_count
for di in di_mutation_types:
di_mutation_types[di] = (di_mutation_types[di] + 1) / di_count
for single in single_mutation_types:
single_mutation_types[single] = (single_mutation_types[single] + 1) / single_count
print(tri_mutation_types)
print(di_mutation_types)
print(single_mutation_types)
return tri_mutation_types, di_mutation_types, single_mutation_types
def extended_main():
pickle_in = open('realData_indels_1500_extended_probabilities', "rb")
# Load saved mutation probabilities from reference data
tri_ref = pickle.load(pickle_in)
di_ref = pickle.load(pickle_in)
single_ref = pickle.load(pickle_in)
pickle_in.close()
tri_cnd, di_cnd, single_cnd = count_extended_mutations('generator_indel_output_test_des.txt')
# Make sure every substitution in reference dictionary is in generated dictionary, even with probability 0
for key in tri_ref:
if key not in tri_cnd:
tri_cnd[key] = 0
print("Missing key: ", key)
for key in di_ref:
if key not in di_cnd:
di_cnd[key] = 0
print("Missing key: ", key)
for key in single_ref:
if key not in single_cnd:
single_cnd[key] = 0
print("Missing key: ", key)
print("\nFor trinucleotides...")
euclidean_distance = 0
for key in tri_cnd:
for key2 in tri_ref:
if key == key2:
euclidean_distance += (tri_ref[key] - tri_cnd[key]) ** 2
print("Squared Euclidean Distance: ", euclidean_distance)
euclidean_distance = math.sqrt(euclidean_distance)
print("Euclidean Distance: ", euclidean_distance)
kl_divergence = 0
for key in tri_cnd:
for key2 in tri_ref:
if key == key2:
temp = math.log(((tri_cnd[key]) / (tri_ref[key]))) * tri_cnd[key]
kl_divergence += temp
print("K-L Divergence: ", kl_divergence)
print("\nFor dinucleotides...")
euclidean_distance = 0
for key in di_cnd:
for key2 in di_ref:
if key == key2:
euclidean_distance += (di_ref[key] - di_cnd[key]) ** 2
print("Squared Euclidean Distance: ", euclidean_distance)
euclidean_distance = math.sqrt(euclidean_distance)
print("Euclidean Distance: ", euclidean_distance)
kl_divergence = 0
for key in di_cnd:
for key2 in di_ref:
if key == key2:
temp = math.log(((di_cnd[key]) / (di_ref[key]))) * di_cnd[key]
kl_divergence += temp
print("K-L Divergence: ", kl_divergence)
print("\nFor single nucleotides...")
euclidean_distance = 0
for key in single_cnd:
for key2 in single_ref:
if key == key2:
euclidean_distance += (single_ref[key] - single_cnd[key]) ** 2
print("Squared Euclidean Distance: ", euclidean_distance)
euclidean_distance = math.sqrt(euclidean_distance)
print("Euclidean Distance: ", euclidean_distance)
kl_divergence = 0
for key in single_cnd:
for key2 in single_ref:
if key == key2:
temp = math.log(((single_cnd[key]) / (single_ref[key]))) * single_cnd[key]
kl_divergence += temp
print("K-L Divergence: ", kl_divergence)
if __name__ == "__main__":
main()
extended_main()
| [
"sean.macrae@mail.mcgill.ca"
] | sean.macrae@mail.mcgill.ca |
ba87a0ef4833a6b0a51aaf46629dcd17a1a706f2 | 968eb47b8b5aadeefd1f3ff52738b27eb66bc862 | /src/db/test_kommentti.py | e6daf2a2bcc312662f7b37fffd9876402ed4f330 | [] | no_license | jgsavola/rohmotti | 96787d13a8dde504fdf11b24f2ac00e956302f65 | 247beffc85a10bd7f5c1e4724a1eb638a306ee3b | refs/heads/master | 2021-01-10T20:28:31.573442 | 2012-09-09T20:37:28 | 2012-09-09T20:37:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | #!/usr/bin/python
import os
import pwd
import psycopg2
from DatabaseObject import DatabaseObject
from Kommentti import Kommentti
dbuser = pwd.getpwuid(os.getuid()).pw_name
dbname = dbuser
conn = psycopg2.connect("dbname=%s user=%s" % (dbname, dbuser))
DatabaseObject.setDatabaseConnection(conn)
kuva = Kommentti.load_from_database(3)
print "kommentti: %d -- %s" % (kuva.kommentti_id, kuva.teksti)
ids = Kommentti.load_ids(kohde_id=10)
for i in ids:
print " kommentti(kohde_id=10): %d" % (i)
| [
"jonne.savolainen@helsinki.fi"
] | jonne.savolainen@helsinki.fi |
255e390a2a1c7707fedd1dbac54e53f428b10003 | 9a1e3c22f19d0124276a19c414ed8555b7cabe11 | /echolect/core/subsectime.py | a192cf157d4a635060b2381e4631979a11bdbe9b | [] | no_license | ryanvolz/echolect | 05e6373f3566f4cc6e1ee9f449ec3e931389e784 | ec2594925f34fdaea69b64e725fccb0c99665a55 | refs/heads/master | 2021-01-17T09:15:57.382042 | 2016-04-25T20:40:44 | 2016-04-25T20:40:44 | 9,969,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,794 | py | #-----------------------------------------------------------------------------
# Copyright (c) 2014, Ryan Volz
# All rights reserved.
#
# Distributed under the terms of the BSD 3-Clause ("BSD New") license.
#
# The full license is in the LICENSE file, distributed with this software.
#-----------------------------------------------------------------------------
"""A module for working with dates/times that have precise sub-second resolution.
Exported Classes:
FixedTimezone -- Fixed timezone defined by timezone offset and DST flag.
SubSecTimeDelta -- Class representing relative times with sub-second resolution.
SubSecTime -- Class representing absolute times with sub-second resolution.
@author: Ryan Volz
"""
from __future__ import division as _division
import datetime as _datetime
import calendar as _calendar
import warnings as _warnings
import math as _math
import re as _re
##TODO
# __slots__ to save space per object?
class FixedTimezone(_datetime.tzinfo):
"""Fixed timezone defined by timezone offset and DST flag."""
def __init__(self, tz_offset=0, DST=False, name=None):
"""Initialize fixed offset timezone.
Arguments:
tz_offset -- Timezone hours from UTC (+ for East, - for West).
DST -- Boolean indicating whether Daylight Savings Time offset
should be included.
name -- String giving the timezone name.
"""
self._offset = _datetime.timedelta(hours=tz_offset + DST)
self._DST = DST
self._name = name
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return _datetime.timedelta(hours=self._DST)
class SubSecTimeDelta(object):
"""A class for representing relative times with sub-second resolution.
Public Attributes:
seconds -- Integer giving the number of seconds.
subseconds -- Integer giving the number of subseconds.
Public Methods:
total_seconds -- Return the time as a float in seconds.
total_subseconds -- Return the total time as an integer in subseconds.
"""
def __new__(cls, secs=0, ssecs=0, factor=1000000):
"""Initialize SubSecTimeDelta from seconds and subseconds integers.
Arguments:
secs -- Integer giving the number of seconds.
ssecs -- Integer giving the number of subseconds.
factor -- Integer giving conversion factor from subseconds to seconds.
The total time is given by (secs + ssecs/factor) seconds.
"""
self = cls.nofix(int(secs), int(ssecs), int(factor))
# make sure ssecs is valid (absolute value less than factor)
secs, ssecs = divmod(self._ssecs, self._factor)
secs = secs + self._secs
# make nsecs lie between 0 and factor-1 (inclusive) if secs > 0
# and between -(factor-1) and 0 (inclusive) if secs < 0
# (nsecs is already >= 0 from the divmod above)
if secs < 0 and ssecs > 0:
secs += 1
ssecs -= self._factor
self._secs = secs
self._ssecs = ssecs
return self
@classmethod
def nofix(cls, secs=0, ssecs=0, factor=1000000):
self = object.__new__(cls)
self._secs = secs
self._ssecs = ssecs
self._factor = factor
return self
@classmethod
def from_seconds(cls, seconds, factor=1000000):
factor = int(factor)
if seconds >= 0:
secs = int(seconds)
ssecs = int(round((seconds % 1)*factor))
return cls.nofix(secs=secs, ssecs=ssecs, factor=factor)
else:
secs = -int(-seconds)
ssecs = -int(round((-seconds % 1)*factor))
return cls.nofix(secs=secs, ssecs=ssecs, factor=factor)
@property
def seconds(self):
"""Get seconds."""
return self._secs
@property
def subseconds(self):
"""Get subseconds."""
return self._ssecs
@property
def factor(self):
"""Get subsecond factor."""
return self._factor
def total_seconds(self):
"""Return the time as a float in seconds."""
return self._secs + self._ssecs/self._factor
def total_subseconds(self):
"""Return the total time as an integer in subseconds."""
return self._ssecs + self._secs*self._factor
def make_special(self):
factor = self._factor
if factor == 1000:
return MilliTimeDelta.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000:
return MicroTimeDelta.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000000:
return NanoTimeDelta.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000000000:
return PicoTimeDelta.nofix(self._secs, self._ssecs, factor)
return self
def change_factor(self, factor):
"""Change the subsecond factor to the one provided."""
if factor == self._factor:
return self
changed = SubSecTimeDelta(self._secs, self._ssecs*factor//self._factor,
factor)
return changed
def equalize_factors(self, other):
"""Equalize factors for two SubSecTimeDelta objects to their maximum."""
if self._factor == other._factor:
return self, other
max_factor = max(self._factor, other._factor)
return self.change_factor(max_factor), other.change_factor(max_factor)
def fractional_digits(self, precision=12):
dec_exp = _math.log10(self._factor)
if dec_exp.is_integer():
fstr = '{0:0' + str(int(dec_exp)) + '}'
return fstr.format(self._ssecs).rstrip('0')
else:
fstr = '{0:.' + str(precision) + 'f}'
fracstring = fstr.format(float(self._ssecs)/self._factor
).split('.')[1].rstrip('0')
if fracstring == '':
fracstring = '0'
return fracstring
def __repr__(self):
return '{0}({1}, {2}, {3})'.format(self.__class__.__name__, self._secs,
self._ssecs, self._factor)
def __str__(self):
s = str(self._secs) + '.' + self.fractional_digits()
# remove decimal point if no fractional digits
s = s.rstrip('0').rstrip('.')
return s
def __float__(self):
return self.total_seconds()
def __add__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
secs = s._secs + o._secs
ssecs = s._ssecs + o._ssecs
if isinstance(other, SubSecTime):
return type(other)(secs, ssecs, o._factor)
return type(self)(secs, ssecs, s._factor)
elif isinstance(other, (int, long, float)):
# assume other represents seconds
return self + SubSecTimeDelta.from_seconds(other, factor=self._factor)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
return SubSecTimeDelta.__add__(self, -other)
def __rsub__(self, other):
return SubSecTimeDelta.__add__(-self, other)
def __pos__(self):
return self
def __neg__(self):
return type(self).nofix(-self._secs, -self._ssecs, self._factor)
def __abs__(self):
return type(self).nofix(abs(self._secs), abs(self._ssecs), self._factor)
def __mul__(self, other):
if isinstance(other, (int, long)):
return type(self)(self._secs*other, self._ssecs*other, self._factor)
if isinstance(other, float):
a, b = other.as_integer_ratio()
return self * a / b
return NotImplemented
__rmul__ = __mul__
def __divmod__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
div, rem = divmod(s.total_subseconds(),
o.total_subseconds())
mod = type(other).nofix(0, rem, o.factor)
return div, mod
return NotImplemented
def __floordiv__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
return s.total_subseconds() // o.total_subseconds()
elif isinstance(other, (int, long)):
return type(self)(0, self.total_subseconds() // other,
self._factor)
return NotImplemented
def __mod__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
return type(other)(0, (s.total_subseconds()
% o.total_subseconds()), o._factor)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
return s.total_subseconds() / o.total_subseconds()
elif isinstance(other, (int, long, float)):
return type(self)(0, self.total_subseconds() / other,
self._factor)
return NotImplemented
__div__ = __truediv__
def __cmp__(self, other):
if isinstance(other, SubSecTimeDelta):
s, o = self.equalize_factors(other)
return cmp((s._secs, s._ssecs), (o._secs, o._ssecs))
elif isinstance(other, (int, long, float)):
# assume other represents seconds
return cmp(self, type(self).from_seconds(other, self._factor))
return NotImplemented
def __hash__(self):
return hash((self._secs, self._ssecs, self._factor))
class SubSecTime(SubSecTimeDelta):
"""A class for representing absolute times with subsecond resolution.
The time represents the seconds and subseconds in UTC from epoch.
Derived from SubSecTimeDelta, so provides all of its functionality.
Additional methods:
from_datetime -- Create a SubSecTime object from a datetime object.
to_datetime -- Convert SubSecTime object to a datetime object.
Addition or subtraction with a SubSecTimeDelta produces a new SubSecTime
object. Otherwise, operations are defined as with SubSecTimeDelta.
"""
_fromstring_compiled_re = None
@classmethod
def from_string(cls, timestr):
"""Create SubSecTime from str in strftime format '%Y-%m-%d %H:%M:%S.%f'.
Converting a SubSecTime to and from a string is possible so that
sst == SubSecTime.from_string(str(sst)).
"""
if cls._fromstring_compiled_re is None:
pat = '(?P<Y>\d\d\d\d)' \
+ '-(?P<m>1[0-2]|0[1-9]|[1-9])' \
+ '-(?P<d>3[0-1]|[1-2]\d|0[1-9]|[1-9]| [1-9])' \
+ '\s+(?P<H>2[0-3]|[0-1]\d|\d)' \
+ ':(?P<M>[0-5]\d|\d)' \
+ ':(?P<S>6[0-1]|[0-5]\d|\d)' \
+ '\.(?P<f>[0-9]+)'
cls._fromstring_compiled_re = _re.compile(pat, _re.IGNORECASE)
v = cls._fromstring_compiled_re.match(timestr).groupdict()
secsdt = _datetime.datetime(int(v['Y']), int(v['m']), int(v['d']),
int(v['H']), int(v['M']), int(v['S']))
secs = _calendar.timegm(secsdt.utctimetuple())
ssecs = int(v['f'])
digits = len(v['f'])
factor = 10**digits
return cls(secs, ssecs, factor)
@classmethod
def from_datetime(cls, dt):
"""Create a SubSecTime object from a datetime object.
Arguments:
dt -- Datetime object to be converted to a SubSecTime object.
Returns:
A SubSecTime object giving the time from epoch in UTC.
"""
secs = _calendar.timegm(dt.utctimetuple())
ssecs = dt.microsecond
factor = 1000000
return cls(secs, ssecs, factor)
def to_datetime(self, tz=None):
"""Convert SubSecTime object to a datetime object.
Precision may be lost in the conversion since datetimes are only
accurate to microseconds.
Arguments:
tz -- Timezone given by a _datetime.tzinfo object within which the
datetime will be given.
If None, timezone representing UTC will be used.
Returns:
A datetime object representing the same time as the SubSecTime object.
"""
if tz is None:
tz = FixedTimezone(tz_offset=0, DST=0, name='UTC')
return _datetime.datetime.fromtimestamp(self.total_seconds(), tz)
def make_special(self):
factor = self._factor
if factor == 1000:
return MilliTime.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000:
return MicroTime.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000000:
return NanoTime.nofix(self._secs, self._ssecs, factor)
elif factor == 1000000000000:
return PicoTime.nofix(self._secs, self._ssecs, factor)
return self
def change_factor(self, factor):
"""Change the subsecond factor to the one provided."""
if factor == self._factor:
return self
changed = SubSecTime(self._secs, self._ssecs*factor//self._factor,
factor)
return changed
def strftime(self, fstr, precision=12):
"""Like datetime strftime, but '%f' is replaced with fractional
subsecond digits.
"""
dtfstr = fstr.replace('%f', self.fractional_digits(precision))
return self.to_datetime().strftime(dtfstr)
def __str__(self):
return self.strftime('%Y-%m-%d %H:%M:%S.%f').rstrip('0').rstrip('.')
def __add__(self, other):
if isinstance(other, SubSecTime):
return NotImplemented
return SubSecTimeDelta.__add__(self, other)
__radd__ = __add__
def __sub__(self, other):
if isinstance(other, SubSecTime):
# need to handle this specially to get SubSecTimeDelta as output
s, o = self.equalize_factors(other)
secs = s._secs - o._secs
ssecs = s._ssecs - o._ssecs
deltaclass = o.__class__.__bases__[0] # to keep specialized type
return deltaclass(secs, ssecs, s._factor)
return SubSecTimeDelta.__sub__(self, other)
class MilliTimeDelta(SubSecTimeDelta):
# needed to set default factor
def __new__(cls, secs=0, ssecs=0, factor=1000):
return SubSecTimeDelta.__new__(cls, secs, ssecs, factor)
@classmethod
def nofix(cls, secs=0, ssecs=0, factor=1000):
if factor != 1000:
_warnings.warn('Converting subseconds to new factor', RuntimeWarning)
ssecs = (ssecs*1000) // factor
if (factor > 1000) and (((ssecs*1000) % factor) != 0):
_warnings.warn('Precision lost in conversion to new subsecond factor',
RuntimeWarning)
return super(MilliTimeDelta, cls).nofix(secs, ssecs, 1000)
@classmethod
def from_seconds(cls, seconds):
factor = 1000
return super(MilliTimeDelta, cls).from_seconds(seconds, factor)
@property
def milliseconds(self):
"""Get milliseconds."""
return self.subseconds
def total_milliseconds(self):
"""Return the total time as an integer in milliseconds."""
return self.total_subseconds()
def __repr__(self):
return '{0}({1}, {2})'.format(self.__class__.__name__, self._secs,
self._ssecs)
class MilliTime(MilliTimeDelta, SubSecTime):
pass
class MicroTimeDelta(SubSecTimeDelta):
# needed to set default factor
def __new__(cls, secs=0, ssecs=0, factor=1000000):
return SubSecTimeDelta.__new__(cls, secs, ssecs, factor)
@classmethod
def nofix(cls, secs=0, ssecs=0, factor=1000000):
if factor != 1000000:
_warnings.warn('Converting subseconds to new factor', RuntimeWarning)
ssecs = (ssecs*1000000) // factor
if (factor > 1000000) and (((ssecs*1000000) % factor) != 0):
_warnings.warn('Precision lost in conversion to new subsecond factor',
RuntimeWarning)
return super(MicroTimeDelta, cls).nofix(secs, ssecs, 1000000)
@classmethod
def from_seconds(cls, seconds):
factor = 1000000
return super(MicroTimeDelta, cls).from_seconds(seconds, factor)
@property
def microseconds(self):
"""Get microseconds."""
return self.subseconds
def total_microseconds(self):
"""Return the total time as an integer in microseconds."""
return self.total_subseconds()
def __repr__(self):
return '{0}({1}, {2})'.format(self.__class__.__name__, self._secs,
self._ssecs)
class MicroTime(MicroTimeDelta, SubSecTime):
pass
class NanoTimeDelta(SubSecTimeDelta):
# needed to set default factor
def __new__(cls, secs=0, ssecs=0, factor=1000000000):
return SubSecTimeDelta.__new__(cls, secs, ssecs, factor)
@classmethod
def nofix(cls, secs=0, ssecs=0, factor=1000000000):
if factor != 1000000000:
_warnings.warn('Converting subseconds to new factor', RuntimeWarning)
ssecs = (ssecs*1000000000) // factor
if (factor > 1000000000) and (((ssecs*1000000000) % factor) != 0):
_warnings.warn('Precision lost in conversion to new subsecond factor',
RuntimeWarning)
return super(NanoTimeDelta, cls).nofix(secs,
ssecs,
1000000000)
@classmethod
def from_seconds(cls, seconds):
factor = 1000000000
return super(NanoTimeDelta, cls).from_seconds(seconds, factor)
@property
def nanoseconds(self):
"""Get nanoseconds."""
return self.subseconds
def total_nanoseconds(self):
"""Return the total time as an integer in nanoseconds."""
return self.total_subseconds()
def __repr__(self):
return '{0}({1}, {2})'.format(self.__class__.__name__, self._secs,
self._ssecs)
class NanoTime(NanoTimeDelta, SubSecTime):
pass
class PicoTimeDelta(SubSecTimeDelta):
# needed to set default factor
def __new__(cls, secs=0, ssecs=0, factor=1000000000000):
return SubSecTimeDelta.__new__(cls, secs, ssecs, factor)
@classmethod
def nofix(cls, secs=0, ssecs=0, factor=1000000000000):
if factor != 1000000000000:
_warnings.warn('Converting subseconds to new factor', RuntimeWarning)
ssecs = (ssecs*1000000000000) // factor
if (factor > 1000000000000) and (((ssecs*1000000000000) % factor) != 0):
_warnings.warn('Precision lost in conversion to new subsecond factor',
RuntimeWarning)
return super(PicoTimeDelta, cls).nofix(secs,
ssecs,
1000000000000)
@classmethod
def from_seconds(cls, seconds):
factor = 1000000000000
return super(PicoTimeDelta, cls).from_seconds(seconds, factor)
@property
def picoseconds(self):
"""Get picoseconds."""
return self.subseconds
def total_picoseconds(self):
"""Return the total time as an integer in picoseconds."""
return self.total_subseconds()
def __repr__(self):
return '{0}({1}, {2})'.format(self.__class__.__name__, self._secs,
self._ssecs)
class PicoTime(PicoTimeDelta, SubSecTime):
pass
| [
"ryan.volz@gmail.com"
] | ryan.volz@gmail.com |
686006acd784aeb64f48aa38eeb51d5c566319c7 | 1d11ff770c5530de4c18e83d9474d4c09c4376d2 | /igor/std-plugins/philips/scripts/philips.py | 0a6d1b43a4de640d5a4642c054379da4b21d6527 | [
"MIT"
] | permissive | bobandrey37/igor | 6660508639d90e7f44ea85146581685513b99ca2 | 41e163c8fa3da8ef13a337e1fe4268cf6fd7d07a | refs/heads/master | 2020-05-01T06:27:36.954089 | 2019-03-04T14:45:26 | 2019-03-04T14:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,929 | py | #!/usr/bin/python
from __future__ import print_function
from __future__ import unicode_literals
from future import standard_library
standard_library.install_aliases()
from builtins import object
import socket
import struct
import select
import json
import urllib.request, urllib.parse, urllib.error
import sys
DEBUG=False
ORDER = [
('192', '168', '1'),
('10', '0', '1'),
('10', '0', '2')
]
JOINTSPACE_PORT=1925
VOODOO_PORT=2323
VOODOO_VERSION=0x03010401
VPMT_DISCOVER=1
VOODOO_DISCOVER = struct.pack('<l28xll16s96s96s96s', VOODOO_VERSION, VPMT_DISCOVER, 0, '1234567890123456', 'Python Control', 'Jack', 'Philips.py')
class JointSpaceRemote(object):
def __init__(self, ipaddr=None):
self.tv = None
def connect(self):
while not self.tv:
self.tv = self.findTV()
if self.tv:
break
if DEBUG: print("TV not found, is it turned on?'")
return False
return True
def findTV(self, ipaddr=None):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.bind(('', VOODOO_PORT))
if ipaddr:
sock.sendto(VOODOO_DISCOVER, (ipaddr, VOODOO_PORT))
else:
sock.sendto(VOODOO_DISCOVER, ('<broadcast>', VOODOO_PORT))
while True:
result = select.select([sock], [], [], 5)
if sock in result[0]:
msg, sender = sock.recvfrom(2000)
if DEBUG: print('Got message from', sender[0])
myHostName = socket.gethostname()
if not '.' in myHostName:
myHostName = myHostName + '.local'
if not sender[0] in socket.gethostbyname_ex(myHostName)[2]:
# It is not our own message. It must be the Philips TV.
return sender[0]
else:
break
return None
def getData(self, path):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
if DEBUG: print('GET', url)
data = urllib.request.urlopen(url).read()
##print 'RAW', data
data = json.loads(data)
##print 'DECODED', data
return data
def putData(self, path, data):
assert self.tv
url = 'http://%s:1925/1/%s' % (self.tv, path)
data = json.dumps(data)
if DEBUG: print('POST %s DATA %s' % (url, data))
data = urllib.request.urlopen(url, data).read()
if data:
if DEBUG: print('PUTDATA RETURNED', data)
def curWatching(self):
assert self.tv
data = self.getData('sources/current')
source = data['id']
if source == 'tv':
chanID = self.getData('channels/current')['id']
chanInfo = self.getData('channels/%s' % chanID)
name = chanInfo['name']
else:
names = self.getData('sources')
name = names[source]['name']
return source, name
def cmd_sources(self):
"""List available input sources"""
assert self.tv
data = self.getData('sources')
for source, descr in list(data.items()):
print('%s\t%s' % (source, descr['name']))
def cmd_channels(self):
"""List available TV channels"""
assert self.tv
data = self.getData('channels')
all = []
for fingerprint, descr in list(data.items()):
all.append((int(descr['preset']), descr['name']))
all.sort()
for preset, name in all:
print('%s\t%s' % (preset, name))
def cmd_source(self, source=None):
"""Set to the given input source (or print current source)"""
assert self.tv
if source:
self.putData('sources/current', {'id' : source })
else:
data = self.getData('sources/current')
print(data['id'])
def cmd_channel(self, channel=None):
"""Set to the given TV channel, by name, number or ID (or list current channel)"""
assert self.tv
if channel:
data = self.getData('channels')
for chID, chDescr in list(data.items()):
if chID == channel or chDescr['preset'] == channel or chDescr['name'] == channel:
self.putData('channels/current', { 'id' : chID })
self.putData('sources/current', {'id' : 'tv' })
return
print('No such channel: %s' % channel, file=sys.stderr)
else:
data = self.getData('channels/current')
chID = data['id']
data = self.getData('channels')
print('%s\t%s' % (data[chID]['preset'], data[chID]['name']))
def cmd_volume(self, volume=None):
"""Change volume on the TV"""
assert self.tv
if volume is None:
data = self.getData('audio/volume')
muted = ' (muted)' if data['muted'] else ''
print('%d%s' % (data['current'], muted))
else:
volume = int(volume)
self.putData('audio/volume', { 'muted' : False, 'current' : volume })
def cmd_json(self, data=None):
"""Return all data as a JSON object"""
if data is None:
data = {}
volumeData = self.getData('audio/volume')
data['volume'] = volumeData['current']
data['muted'] = volumeData['muted']
data['source'] = self.getData('sources/current')['id']
data['power'] = True
data['ip-address'] = self.tv
data['url'] = 'http://%s:1925/1/' % (self.tv)
else:
jData = json.loads(data)
assert 0
print(json.dumps(data))
def cmd_help(self):
"""List available commands"""
for name in dir(self):
if name[:4] == 'cmd_':
method = getattr(self, name)
doc = method.__doc__
print('%s\t%s' % (name[4:], doc))
def main():
if len(sys.argv) > 1 and sys.argv[1] == '-d':
global DEBUG
DEBUG=True
del sys.argv[1]
tv = JointSpaceRemote()
if not tv.connect():
if len(sys.argv) == 2 and sys.argv[1] == 'json':
print('{"power":false}')
sys.exit(0)
print("TV not found, is it turned on?", file=sys.stderr)
sys.exit(1)
if len(sys.argv) <= 1:
print(tv.curWatching())
else:
cmdName = 'cmd_' + sys.argv[1]
if not hasattr(tv, cmdName):
print('Unknown command: %s. Use help for help' % sys.argv[1], file=sys.stderr)
sys.exit(2)
cmd = getattr(tv, cmdName)
cmd(*sys.argv[2:])
if __name__ == '__main__':
main()
| [
"Jack.Jansen@cwi.nl"
] | Jack.Jansen@cwi.nl |
1a5af30a4278d2716a06a58addc4f5fc79dd4118 | 60869f03b1f9d2ba0c1cd5012b53ce4fcb5b5c65 | /app/decorators.py | ac6930a0c1f1f68dc381831d9a2f04727d88f3e9 | [] | no_license | mjyplusone/FlaskyWeb | 5c9cd3aee7f6ce19685797c24ef2f7fc67d1dba0 | 875372e6f4a3f88cf08dfa0066beaf3578274f60 | refs/heads/master | 2020-06-12T03:34:34.369441 | 2017-02-28T05:42:48 | 2017-02-28T05:42:48 | 75,609,391 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | from functools import wraps
from flask import abort
from flask_login import current_user
from .models import Permission
def permission_required(permission):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not current_user.can(permission):
abort(403)
return f(*args, **kwargs)
return decorated_function
return decorator
def admin_required(f):
return permission_required(Permission.ADMINISTER)(f) | [
"mjyplusone@qq.com"
] | mjyplusone@qq.com |
fc9754b4705724150760f3d0800f70772f6af4cc | 7b4aa8237342d3adf7e7187d286c178ead6e6c3e | /backend/customer/admin.py | 0de8fdf7d2662c0965ddfcf0fd66d40df4483452 | [] | no_license | vgk77/LSD | 3d6352a4cdb08c8382671628913343dd7bea9b0c | 7df7882a738c6eeab7d865729e37f3a2aa75738d | refs/heads/main | 2023-01-01T20:55:20.007924 | 2020-10-28T15:44:24 | 2020-10-28T15:44:24 | 303,938,225 | 0 | 0 | null | 2020-10-28T15:44:25 | 2020-10-14T07:35:59 | Python | UTF-8 | Python | false | false | 717 | py | from django.contrib import admin
from .models import Customer, Ticket
class TicketsInline(admin.TabularInline):
fields = ['message', 'status']
model = Ticket
extra = 0
readonly_fields = ['message']
@admin.register(Customer)
class CustomerAdmin(admin.ModelAdmin):
list_display = ('name', 'telegram_id', 'id')
fields = ('id', 'telegram_id', 'name')
readonly_fields = ('id', )
search_fields = ('name', 'id')
inlines = [TicketsInline]
@admin.register(Ticket)
class TicketAdmin(admin.ModelAdmin):
list_display = ('number', 'topic', 'status', 'created_at', 'updated_at', )
readonly_fields = ('number', 'created_at', 'updated_at')
search_fields = ('topic', 'number', )
| [
"worka.zip@gmail.com"
] | worka.zip@gmail.com |
b0841733366ddb350f8674ffaadb4afba535b375 | d5504c795e16228dd2b8e21aa2d37b1e17f04b12 | /sari/Ventas/migrations/0005_auto_20210408_1636.py | 9c237e62b6f3149a8d69b8a6910057018d87d1f2 | [] | no_license | VianneyDeLosAngeles/locust | 37a003a50b11fad72d3af43d568b26249a19ed2f | d6fe614151f53aa5dcba431297a832ceb3ec8439 | refs/heads/master | 2023-08-29T15:11:06.112518 | 2021-04-16T05:51:15 | 2021-04-16T05:51:15 | 417,546,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | # Generated by Django 3.0.5 on 2021-04-08 21:36
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Ventas', '0004_auto_20210408_1405'),
]
operations = [
migrations.RemoveField(
model_name='ventassap',
name='canal',
),
migrations.AddField(
model_name='ventassap',
name='texto',
field=models.CharField(help_text='Texto', max_length=200, null=True),
),
migrations.AlterField(
model_name='ventassap',
name='fechaInsercion',
field=models.DateTimeField(blank=True, default=datetime.datetime(2021, 4, 8, 16, 36, 26, 401651)),
),
]
| [
"vianneyapg@gmail.com"
] | vianneyapg@gmail.com |
64fefe69374a945cd3cee114ee12edf794271c0a | 3f2aceeccfa71a71998519721fe1480fd5cab1aa | /Python_cdoe/OldVer1/LED.py | 53233a640ab3f58a42ce01bb676c3670d93c5678 | [] | no_license | FullofQ/Raspberry-Pi | ab8b605ee7c5a74e7cecbc86121ea2c8af49791c | a67af5e57a07e866242464bfe89ef245be8d8477 | refs/heads/master | 2021-07-24T02:18:07.473980 | 2017-11-03T10:58:59 | 2017-11-03T10:58:59 | 107,524,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import RPi.GPIO as GPIO
import time
# BOARD Number Method,Based Pin Number
#GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM) #Set the mode to represent the numbering scheme you prefer
GPIO.cleanup() #Return all channels you have used back to inputs
GPIO.setwarnings(False)
#Output mode
GPIO.setup(17,GPIO.OUT)
while True:
GPIO.output(17,GPIO.HIGH)
time.sleep(1)
GPIO.output(17,GPIO.LOW)
time.sleep(1) | [
"s15115148@stu.edu.tw"
] | s15115148@stu.edu.tw |
b4f7bf104cbf6c49a715b1e44d45cd0c93367dd8 | fde59b1713268b3a885d23f515ad097d237b1f62 | /handwritten-digits-interface/controllers/handwrittenInputProcessing.py | 92d850fe4dd30cb1c280fd993ebddb26b91001e0 | [] | no_license | cathalhughes/eyelearn | 7e3df96ba4eccf6ab97804f519212bd9187adf09 | 20d85e7d8848712e71b058b23cbf7e6b08fd4321 | refs/heads/master | 2022-12-06T02:03:04.113795 | 2019-07-18T15:45:37 | 2019-07-18T15:45:37 | 197,613,504 | 0 | 0 | null | 2022-11-22T03:15:24 | 2019-07-18T15:30:41 | Python | UTF-8 | Python | false | false | 696 | py | import re
import numpy as np
from scipy.misc import imread, imresize
import base64
def convertHandwrittenImage(imgData):
imgStr = re.search(r'base64,(.*)',imgData)
if imgStr != None:
imgStr = imgStr.group(1)
else:
imgStr = imgData
#print(imgStr)
with open('output.png','wb') as output:
output.write(base64.b64decode(imgStr))
def processImage(filename):
img = imread(filename, mode='L')
img = np.invert(img)
img = imresize(img, (28,28))
img = img.astype('float32') ##processImage helps emnis5t ##miuch f amuchness with mnits
img = img.reshape(1,28,28,1)
img /= 255 ##helps emnist much of a muchness with mnistr
return img | [
"cathalnhughes@gmail.com"
] | cathalnhughes@gmail.com |
1c0fdc97f402c72233bfa491f46d329a4bd03232 | c454f3741d84c658126978cc1ec7fc10265df531 | /test_piskvorky.py | 40879cbc0a560a368d4e549113046d3695c4b64a | [] | no_license | BarbPeters/piskvorky | 0d468e8feb4d83ca1d6689144369ec0cc219f987 | 544951c197bf9406028a05498a7d075da44797d8 | refs/heads/master | 2020-05-20T16:50:26.396221 | 2019-05-08T21:14:10 | 2019-05-08T21:14:10 | 185,673,769 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,446 | py | import pytest
from piskvorky import vyhodnot
from util import tah
from ai import tah_pocitace
def test_vyhodnot_vyhra_x():
"Objeví se 3 x za sebou"
assert vyhodnot('---xxx--------------') == 'x'
assert vyhodnot('-----------------xxx') == 'x'
def test_vyhodnot_vyhra_o():
"Objeví se 3 o za sebou"
assert vyhodnot('---ooo--------------') == 'o'
assert vyhodnot('-----------------ooo') == 'o'
def test_vyhodnot_remiza():
"V poli neni znak -"
assert vyhodnot('xoxoxoxoxoxoxoxoxoxx') == '!'
assert vyhodnot('xxooxxooxxooxxooxxoo') == '!'
def test_tah_x():
"Tah x"
assert tah("--------------------", 2, "x") == "--x-----------------"
assert tah("--------------------", 19, "x") == "-------------------x"
def test_tah_o():
"Tah o"
assert tah("--------------------", 2, "o") == "--o-----------------"
assert tah("--------------------", 19, "o") == "-------------------o"
def test_tah_pocitace_na_prazdne_pole():
"Tah pocitace na prazdne pole "
pole = "--------------------"
result = tah_pocitace(pole)
assert result.count("-") == 19
assert result.count("o") == 1
def test_tah_pocitace_na_pole_3_kolo():
"Tah pocitace na pole po třetím kole "
pole = "--oxxo---x----------"
result = tah_pocitace(pole)
assert result.count("x") == 3
assert result.count("o") == 3
assert result.count("-") == 14
| [
"noreply@github.com"
] | noreply@github.com |
cd4b3ca4bc12da0a8c58e0280049e0e7f7db7603 | 1f9daaf63e0c170b39d444537a2e333c5650756c | /myvenv/bin/easy_install-3.7 | ee109a1c69929489c49b8bb5cf124153a6560115 | [] | no_license | nimuseel/django-study-project | 8795cdc207c14b2f68e9cea8b602f836c801431b | 5b25ea702991eb1140e1baccbfdb41d4fd8d6692 | refs/heads/master | 2022-04-11T11:25:02.897456 | 2020-03-27T08:44:43 | 2020-03-27T08:44:43 | 250,214,659 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | 7 | #!/Users/tommy/dev/djangogirls/myvenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"l08c0739@gmail.com"
] | l08c0739@gmail.com |
94d029748f12974d0eeda80e4a4d9eba57e89393 | c60e9a7b98862c3325cf74c60d7972faf5060872 | /leetcode_2.py | 8e3b892328d7c0eed5bf668fbc7734b696a6104f | [] | no_license | CharleXu/Leetcode | dd4bea4f96c486f85dd4efb846e769ebd05a84ed | 3f8f954dce580119a741f638d59bdaa17f552223 | refs/heads/master | 2022-10-15T20:33:11.766045 | 2020-06-18T03:18:10 | 2020-06-18T03:18:10 | 266,402,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # coding: utf-8
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def add_two_number(l1, l2):
result_node = ListNode(0)
curr = result_node
carry = 0
p, q = l1, l2
while l1 or l2 or carry:
x = (l1.val if l1 else 0)
y = (l2.val if l2 else 0)
total = x + y + carry
carry = total // 10
new_val = total % 10
curr.next = ListNode(new_val)
curr = curr.next
l1 = (l1.next if l1 else None)
l2 = (l2.next if l2 else None)
return result_node.next
if __name__ == "__main__":
l1 = ListNode(0)
l2 = ListNode(0)
l1.next = ListNode(1)
# l1.next.next = ListNode(3)
l2.next = ListNode(1)
l2.next.next = ListNode(2)
ret = add_two_number(l1, l2)
while ret.next:
print(ret.val, end=" -> ")
ret = ret.next
print(ret.val)
| [
"superxc0102@gmail.com"
] | superxc0102@gmail.com |
ab901473dffa5b0193e527d92cfcb1858fbdeb1e | d2fb9c37c966c2f17f2d2a16bf260855ec7a6abd | /categorical_variables.py | 558b20bcadeaadb0af2cf73f33de1979163beda2 | [] | no_license | DimitrOskol/Kaggle-Iowa-Housing-Prices | a6b1cc1c0cf58f72a61c6db7716918382e2615e3 | dda067854a71b3e819fd6a721314c986ac563c48 | refs/heads/main | 2023-03-18T19:22:27.672634 | 2021-02-19T22:06:24 | 2021-02-19T22:06:24 | 340,499,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,953 | py | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
X = pd.read_csv('iowa_train.csv', index_col='Id')
X_test = pd.read_csv('iowa_test.csv', index_col='Id')
y = X.SalePrice
X.drop(['SalePrice'], axis=1, inplace=True)
cols_with_missing = [col for col in X.columns if X[col].isnull().any()]
test_cols_with_missing = [col for col in X_test.columns if X_test[col].isnull().any()]
object_cols = [col for col in X.columns if X[col].dtype == 'object']
object_cols_with_missing_nd = pd.merge(pd.DataFrame(object_cols), pd.DataFrame(cols_with_missing), how='inner').values.tolist()
object_cols_with_missing = []
for col in object_cols_with_missing_nd:
object_cols_with_missing.append(''.join(col))
test_object_cols_with_missing_nd = pd.merge(pd.DataFrame(object_cols), pd.DataFrame(test_cols_with_missing), how='inner').values.tolist()
test_object_cols_with_missing = []
for col in test_object_cols_with_missing_nd:
test_object_cols_with_missing.append(''.join(col))
low_cardinality_cols = [col for col in object_cols if X[col].nunique()<10]
high_cardinality_cols = [col for col in object_cols if X[col].nunique()>=10]
missing_count_X = X[object_cols_with_missing].isnull().sum()
cols_alot_missing = missing_count_X[missing_count_X>700].index
cols_some_missing = missing_count_X[missing_count_X<700]
cols_some_missing = cols_some_missing[cols_some_missing>0].index
X.drop(cols_alot_missing, axis=1, inplace=True)
X_test.drop(cols_alot_missing, axis=1, inplace=True)
for col in cols_alot_missing:
test_object_cols_with_missing.remove(col)
low_cardinality_cols.remove(col)
mfreq_imputer = SimpleImputer(strategy = 'most_frequent')
X_object_imputed = pd.DataFrame(mfreq_imputer.fit_transform(X[cols_some_missing]))
X_object_imputed.columns = cols_some_missing
X_object_imputed.index = X.index
X_without_imputed_cols = X.drop(cols_some_missing, axis=1)
X_all = pd.concat([X_object_imputed, X_without_imputed_cols], axis=1)
X_test_object_imputed = pd.DataFrame(mfreq_imputer.transform(X_test[cols_some_missing]))
X_test_object_imputed.columns = cols_some_missing
X_test_object_imputed.index = X_test.index
X_test_without_imputed_cols = X_test.drop(cols_some_missing, axis=1)
#X_test_without_imputed_cols = X_test_without_imputed_cols.drop(cols_alot_missing)
X_test_all = pd.concat([X_test_object_imputed, X_test_without_imputed_cols], axis=1)
for col in cols_some_missing:
if col in test_object_cols_with_missing:
test_object_cols_with_missing.remove(col)
X_test_object_imputed = pd.DataFrame(mfreq_imputer.fit_transform(X_test_all[test_object_cols_with_missing]))
X_test_object_imputed.columns = test_object_cols_with_missing
X_test_object_imputed.index = X_test.index
X_test_without_imputed_cols = X_test_all.drop(test_object_cols_with_missing, axis=1)
X_test_all = pd.concat([X_test_object_imputed, X_test_without_imputed_cols], axis=1)
aligned_X_all, aligned_X_test_all = X_all.align(X_test_all, join='left', axis=1)
OH_encoder = OneHotEncoder(handle_unknown='ignore', sparse=False)
OH_X_all = pd.DataFrame(OH_encoder.fit_transform(aligned_X_all[low_cardinality_cols]))
OH_X_test_all = pd.DataFrame(OH_encoder.transform(aligned_X_test_all[low_cardinality_cols]))
OH_X_all.index = X.index
OH_X_test_all.index = X_test.index
num_X_all = X_all.drop(low_cardinality_cols, axis=1)
num_X_test_all = X_test_all.drop(low_cardinality_cols, axis=1)
OH_X = pd.concat([OH_X_all, num_X_all], axis=1)
OH_X_test = pd.concat([OH_X_test_all, num_X_test_all], axis=1)
aligned_OH_X, aligned_OH_X_test = OH_X.align(OH_X_test, join='left', axis=1)
label_X = aligned_OH_X.copy()
label_X_test = aligned_OH_X_test.copy()
label_encoder = LabelEncoder()
for col in high_cardinality_cols:
label_X[col] = label_encoder.fit_transform(label_X[col])
label_X_test[col] = label_encoder.transform(label_X_test[col])
mean_imputer = SimpleImputer(strategy = 'mean')
X_processed = pd.DataFrame(mean_imputer.fit_transform(label_X))
X_processed.columns = label_X.columns
X_test_processed = pd.DataFrame(mean_imputer.transform(label_X_test))
X_test_processed.columns = label_X_test.columns
X_train, X_val, y_train, y_val = train_test_split(X_processed, y, train_size=0.8,
test_size=0.2)
RF_model = RandomForestRegressor(n_estimators=100)
RF_model.fit(X_train, y_train)
preds = RF_model.predict(X_val)
mae = mean_absolute_error(preds, y_val)
print('mae = ', mae)
preds_test = RF_model.predict(X_test_processed)
output = pd.DataFrame({'Id': X_test.index,
'SalePrice': preds_test})
output.to_csv('submission.csv', index=False)
| [
"noreply@github.com"
] | noreply@github.com |
4ecdac42392ce61a90b3474bfac3c7a16bf8a8ab | 2648f6dd6d3bd107d5587070664d05f339229d22 | /app/controllers/Produto_controller.py | cd6ffbfe469511e93cb8396e04239797d868592c | [] | no_license | mbbotelho/FastFango | 4e1dbf754df150fc567e8d8b1a6c434b7b5826b4 | c67df1b8131c98247ebb8e4974b0f1474b355a2b | refs/heads/master | 2021-01-23T13:16:20.818836 | 2017-06-03T09:05:41 | 2017-06-03T09:05:41 | 93,236,232 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | from app import app
from app.dao.Produto_dao import Produto_dao
from app.service.Produto_service import Produto_service
from flask import jsonify
import json
service = Produto_service()
@app.route("/produto")
def teste():
i=Produto_dao("batata", 1, 4, 2, "sim")
#self, nome, unidade_medida, quantidade, qtd_minima, item_estoque_vld
return jsonify(service.salvar(i))
@app.route("/produto/list")
def lista_todos():
service.findAll()
print("dumps",json.dumps(service.findAll()))
return jsonify(service.findAll())
@app.route("/produto/<id>")
def findById_produto(id):
service.findById(id)
return 'ok'
@app.route("/produto/update/<id>")
def update_produto(id):
produto = service.findById(id)
print(produto.nome)
service.update(produto)
return 'ok' | [
"mbbotelho23@gmail.com"
] | mbbotelho23@gmail.com |
9f7513aceb03d3a629148eb93790f2bd922608ca | 6c2ecefb12be6b04f597e3fb887d9389050aa7e1 | /DjangoCourse/第三周/djangologin/djangologin/settings.py | ca207ee45368d5f381d99a9266ac9e571e9357b6 | [] | no_license | GmyLsh/learngit | 99d3c75843d2b0b873f26e098025832985c635b3 | 3e7993c7119b79216fea24e5e35035336e4f5f5b | refs/heads/master | 2020-04-12T09:11:55.068312 | 2018-12-19T07:19:42 | 2018-12-19T07:19:42 | 162,395,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,616 | py | """
Django settings for djangologin project.
Generated by 'django-admin startproject' using Django 2.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5cwed$8ury*r$q%)b-vm$(x@z_sqrja($d)nxu#of#&+(3zwg1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'login.apps.LoginConfig',
'hashlogin.apps.HashloginConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djangologin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'djangologin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'login',
'USER':'root',
'PASSWORD':'123456',
'HOST':'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
#app下
STATIC_URL = '/static/'
#根目录下
STATICFILES_DIRS=[os.path.join(BASE_DIR,'static')]
#覆盖默认的用户模型,使用自定义的模型
#语法:'app的名称.自定义用户模型的名称'
AUTH_USER_MODEL='login.UserModel'
#使用@login_required这个装饰器必须设置LOGIN_URL,这个LOGIN_URL就是django用于自动跳转的地址
LOGIN_URL='/login/' | [
"469192981@qq.com"
] | 469192981@qq.com |
090677df534132aebe2bf73fe11a9e41e20fc9b5 | 45ac272d01596731861d92551c0a113df7feb634 | /python/nn_test_multilayer.py | ac3dd9e6862337117770748832dc21e20518650d | [] | no_license | mikst/A.I.F.L. | 4a4b8bc443ab70079ff1e923a356a7dd4aa79488 | e8296c79a6d9e4c67f4978ea01a4ff20fb1378e3 | refs/heads/master | 2020-08-08T21:26:02.768111 | 2019-11-21T18:23:39 | 2019-11-21T18:23:39 | 213,923,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,349 | py | import numpy
import pyttsx3
data_1 = [3,2,4,2.5,3.5,2,5.5,1,4.5]
data_2 = [1.5,1,1.5,1,0.5,0.5,1,1,1]
target = [1,0,1,0,1,0,1,0,1]
engine = pyttsx3.init()
def sigmoid(x):
return 1/(1+numpy.exp(-x))
# def predict(x,y, w1_1, w1_2, b1,w2_1, w2_2, b2, w3_1, w3_2, b3 ):
def predict(x, y):
print("x: ", x, " y:", y)
# calculate the prediction starting with the random weight and bias
z1 = x * w1_1 + y * w1_2 + b1
# make a prediction using sigmoid
prediction1 = sigmoid(z1)
# calculate the prediction starting with the random weight and bias
z2 = (x * w2_1 ) + (y * w2_2) + b2
# make a prediction using sigmoid
prediction2 = sigmoid(z2)
# calculate the prediction starting with the random weight and bias
z3 = prediction1 * w3_1 + prediction2 * w3_2 + b3
# make a prediction using sigmoid
prediction3 = sigmoid(z3)
print("result: ",prediction3)
if prediction3>0.5:
engine.say('red flower')
engine.runAndWait()
else:
engine.say('blue flower')
engine.runAndWait()
w1_1 = numpy.random.randn()
w1_2 = numpy.random.randn()
b1 = numpy.random.randn()
w2_1 = numpy.random.randn()
w2_2 = numpy.random.randn()
b2 = numpy.random.randn()
w3_1 = numpy.random.randn()
w3_2 = numpy.random.randn()
b3 = numpy.random.randn()
learningRate = 0.2
#training loop, number represents the training number, usually big
for i in range(100000):
# pick random data point
num=numpy.random.randint(low=0,high=8)
# calculate the prediction starting with the random weight and bias
z1 = data_1[num] * w1_1 + data_2[num] * w1_2 + b1
# make a prediction using sigmoid
prediction1 = sigmoid(z1)
# calculate the prediction starting with the random weight and bias
z2 = data_1[num] * w2_1 + data_2[num] * w2_2 + b2
# make a prediction using sigmoid
prediction2 = sigmoid(z2)
# calculate the prediction starting with the random weight and bias
z3 = prediction1 * w3_1 + prediction2 * w3_2 + b3
# make a prediction using sigmoid
prediction3 = sigmoid(z3)
#----------------------------------
#compare the model prediction with the actual target value
cost3= (prediction3 - target[num])**2
#find the slope of the cost w, r, t each parameter (w1 w2 b)
#bring derivative through square function
dcost_dpred3= 2* (prediction3 - target[num])
#bring derivative through sigmoid (prediction is sigmoid)
#dpred_dz = sigmoid(z) * (1-sigmoid(z))
dpred_dz3 = prediction3 * (1-prediction3)
dz_dw3_1=prediction1
dz_dw3_2=prediction2
dz_db3=1
#pertial derivatives using the chain rule
dcost_dw3_1=dcost_dpred3*dpred_dz3*dz_dw3_1
dcost_dw3_2=dcost_dpred3*dpred_dz3*dz_dw3_2
dcost_db3=dcost_dpred3*dpred_dz3*dz_db3
#adjust the parameters
w3_1_orig = w3_1
w3_2_orig = w3_2
w3_1-=learningRate*dcost_dw3_1
w3_2-=learningRate*dcost_dw3_2
b3-=learningRate*dcost_db3
#---------------------------------
dE_dNet = dcost_dpred3 * dpred_dz3
dNet_dHout1 = w3_1_orig
dE_dHout1 = dE_dNet * dNet_dHout1
dHout1_dHnet1 = prediction1 * (1-prediction1)
# w1_1
dHnet1_dW1_1 = data_1[num]
dE_dW1_1 = dE_dHout1 * dHout1_dHnet1 * dHnet1_dW1_1
w1_1 -= learningRate * dE_dW1_1
# w1_2
dHnet1_dW1_2 = data_2[num]
dE_dW1_2 = dE_dHout1 * dHout1_dHnet1 * dHnet1_dW1_2
w1_2 -= learningRate * dE_dW1_2
# b
b1 -= learningRate * dE_dHout1 * dHout1_dHnet1
#----------------------------
dNet_dHout2 = w3_2_orig
dE_dHout2 = dE_dNet * dNet_dHout2
dHout2_dHnet2 = prediction2 * (1-prediction2)
# w2_1
dHnet2_dW2_1 = data_1[num]
dE_dW2_1 = dE_dHout2 * dHout2_dHnet2 * dHnet2_dW2_1
w2_1 -= learningRate * dE_dW2_1
# w2_2
dHnet2_dW2_2 = data_2[num]
dE_dW2_2 = dE_dHout2 * dHout2_dHnet2 * dHnet2_dW2_2
w2_2 -= learningRate * dE_dW2_2
# b
b2 -= learningRate * dE_dHout2 * dHout2_dHnet2
print ("w1_1: " , w1_1 ," w1_2: ", w1_2 ," b1: " , b1, "w2_1: " , w2_1 ," w2_2: ", w2_2 ," b2: " , b2, "w3_1: " , w3_1 ," w3_2: ", w3_2 ," b3: " , b3)
while True:
petal1=float(input("petal width?" + "\n"))
petal2=float(input("petal height?" + "\n"))
# predict(petal1, petal2, w1_1, w1_2, b1, w2_1, w2_2, b2, w3_1, w3_2, b3 )
predict(petal1, petal2)
| [
"mika@kobakant.at"
] | mika@kobakant.at |
deca0390c459e8571885bdca335edf175dec7b97 | 216ef3802b5d08704458bfc5c15a520147993357 | /change_desktop_background.pyw | 4f5128649f5bb275ff22f280308bdc00a8f11667 | [] | no_license | softode-code/Change-Desktop-Background | fed6b40a04e9467baaa2bf105878fe52061433a3 | b227d371ee586fca4d4663e9c365a9a902eeaf76 | refs/heads/master | 2022-11-21T14:01:17.351210 | 2020-07-19T12:27:15 | 2020-07-19T12:27:15 | 262,391,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | pyw | import os
import random
import ctypes
import schedule
import time
from datetime import datetime
Path = "E:\\Wallpapers\\"
SPI_SETDESKWALLPAPER = 20
def changeBG():
random.seed(datetime.now())
path = Path+random.choice(os.listdir(Path))
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER,0,path,3)
schedule.every(1).hours.do(changeBG)
while True:
schedule.run_pending()
time.sleep(1) | [
"softode-code@users.noreply.github.com"
] | softode-code@users.noreply.github.com |
924612ceeb29b831cae1895f7c121375eb4ec93c | b3bb5e8c09527bda5bedd12cfcc782d3d752bf9c | /romanNumerals.py | c24b4adbe8dfb330168369db996003d489419ec6 | [] | no_license | anton2mihail/Fun-Programs | 70c3d97d76d123d0826ca36adaee4d68cf003a69 | 1ad00f63cc313fc4f3b63193690f3e60d92033b7 | refs/heads/master | 2021-01-22T08:19:07.630117 | 2018-06-07T19:02:09 | 2018-06-07T19:02:09 | 81,893,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py |
def is_valid_parenthese(str1):
stack, pchar = [], {"(": ")", "{": "}", "[": "]"}
for parenthese in str1:
if parenthese in pchar:
stack.append(parenthese)
elif len(stack) == 0 or pchar[stack.pop()] != parenthese:
return False
return len(stack) == 0
print(is_valid_parenthese("(){}[]"))
print(is_valid_parenthese("()[{)}"))
print(is_valid_parenthese("()"))
| [
"anton2mihail@users.noreply.github.com"
] | anton2mihail@users.noreply.github.com |
5d7616919ca0cc04cfb36d6ff45bdd5136ce1478 | 355949c034fb8cc220800bcc956d317b3e92d343 | /src2/functions.py | 248df755e92cb47635278117038c9450f22e8c35 | [] | no_license | icanevaro/lecture2 | ab28e62a2ee909a95137dac786b5034662df386e | 747a238565480e2cdec93a3a45b19a3e5bc4933a | refs/heads/master | 2021-05-22T17:15:04.860840 | 2020-04-05T16:12:10 | 2020-04-05T16:12:10 | 253,016,723 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | #FUNCIONES#
#Defino una funcion propia, con un argumento que va a ser "x"
def square(x):
#Si vos me pedis "x" te voy a devolver x*x
return x * x
#Voy definir la funcion para cuando lo quiera importar en otro archivo no me traiga esto tambien
def main():
#Y aca voy a utilizar la funcion que arme, para hacer un loop y decir: el numero que ponga al cuadrado es igual a ...
#Uso un .format para definir que va entre {}.
for i in range(10):
print("{} squared is {}".format(i, square(i)))
#lo que me va a mostrar es: de 0 a 9 ese texto que estoy definiendo, con el resultado automatico
#Pregunta:Tengo que definir la funcion antes de usarla?
#Si, tengo que definirla antes, excepto que ya exista. Tiene que estar antes del for la funcion.
#Python lee de arriba para abajo.
#Hay un work around.
#Podemos usar funciones que escribiste en otro archivo.
#Y tengo que agregar algo mas para que no se ejecute cuando lo importo
#agrego, si name igual main, si estoy corriendo este archivo, corre la main function
if __name__ == "__main__":
main() | [
"62725762+icanevaro@users.noreply.github.com"
] | 62725762+icanevaro@users.noreply.github.com |
676b57edf2543587624cb7fb53630425c91c775f | 7c1892d60f07848756cefe0dea0cce7292c7c572 | /database/add.py | a4e5e1046441fc9f119b92398f0c094ccabc923e | [] | no_license | cherryMonth/BWC | 31d92a583b0ff35a18368a2c2ccfdb8d549dd7e1 | 187430bbc9e81d1cbc8721fd423f9b0488e0e78d | refs/heads/master | 2021-01-01T17:29:17.655717 | 2017-07-28T04:40:20 | 2017-07-28T04:40:20 | 98,082,540 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | # coding=utf-8
import csv
import os
import collections
class Add(object):
@staticmethod
def add(filename, key_list=None):
if not os.path.exists(filename):
index = 1
else:
index = 0
try:
with open(filename, 'ab') as csv_file:
if not key_list:
csv_file.close()
return True
def num(string):
count = 0
for n in string:
count = count + ord(n)
return count
error = []
for key in key_list:
d = collections.OrderedDict()
key = sorted(key.items(), key=lambda x: num(x[0]))
for k in key:
d[k[0]] = k[1]
error.append(d)
key_list = error
row_name = key_list[0].keys() # 类变量记录列名
writer = csv.DictWriter(csv_file, fieldnames=row_name)
if index == 1:
writer.writerow(dict(zip(row_name, row_name))) # 写表头
for key in key_list:
writer.writerow(key) # 写数据
csv_file.close()
return True
except IOError:
print "File open error : " + filename + "\nplease check the filename"
return False
if __name__ == '__main__':
Add().add('b.csv',[{'WeChatID': 'wonka80', 'TeacherName': '王珂'}])
| [
"1115064450@qq.com"
] | 1115064450@qq.com |
971d7dd8a6043373601442e66cf7e7273aa6e600 | 255ba93bca8ef858239d4beb67c20f342eeb1155 | /Day 3/prims.py | 3f47e6c2396b71efc0718089df5adb7164fb3e40 | [] | no_license | axayjha/cs16v3 | 2b2596cf63ca59e078b495e60176e8f104021e63 | 080675e34d8cbe1cb7a21a43d27bd14f3b5cff96 | refs/heads/master | 2021-08-24T12:18:53.987370 | 2017-12-09T20:01:23 | 2017-12-09T20:01:23 | 98,687,228 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | class pqueue(object):
def __init__(self, List=None):
self.list = List
def extract_min(self):
a= sorted(self.list, key= lambda l: l.key)[0]
self.list.remove(a)
return a
class vertex(object):
def __init__(self, name):
self.name=name
self.key=99999
self.p=None
class graph(object):
def __init__(self):
self.V = []
self.Adj = {}
def w(u,v):
W = { a: {b:1 ,c:10, d:3}, b:{a:1, e:2}, c:{a:10, d:4}, d:{c:4, e:15, a:3}, e:{b:2, d:15} }
return W[u][v]
a=vertex('a')
b=vertex('b')
c=vertex('c')
d=vertex('d')
e=vertex('e')
a.key=0
G = graph()
G.V = [a,b,c,d,e]
G.Adj = {a:[b,c,d], b:[a,e], c:[a,d], d:[e,c], e:[b,d]}
Q=pqueue(G.V[:])
weight=0
while(Q.list!=[]):
u=Q.extract_min()
for v in G.Adj[u]:
if v in Q.list and w(u,v) < v.key:
v.p = u
v.key = w(u,v)
for i in G.V[1:]:
weight+=i.key
print(i.name + " " + i.p.name)
print("Weight of the mst: " + str(weight))
| [
"noreply@github.com"
] | noreply@github.com |
7a8b58333685500dc2a271d37474d768546b5291 | 62705726424a3dc4dc8316fff696dd04d632a33b | /tests/profiling/collector/test_stack.py | f791d5417cca2f3dbd11b31f374fd950feadfe14 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | YasuoSasaki/dd-trace-py | f6e051545ff1c5741cb416ab6edded67b9eb5a13 | aa3214671d7843447aa11ba1aea1202eae4aff5a | refs/heads/master | 2022-08-23T21:34:05.984284 | 2020-05-06T21:32:55 | 2020-05-06T21:32:55 | 262,933,315 | 0 | 0 | NOASSERTION | 2020-05-11T03:55:18 | 2020-05-11T03:55:17 | null | UTF-8 | Python | false | false | 5,870 | py | import os
import threading
import time
import timeit
import pytest
from ddtrace.vendor import six
from ddtrace.vendor.six.moves import _thread
from ddtrace.profiling import recorder
from ddtrace.profiling.collector import stack
from . import test_collector
TESTING_GEVENT = os.getenv("DD_PROFILE_TEST_GEVENT", False)
try:
from gevent import monkey
except ImportError:
sleep = time.sleep
else:
sleep = monkey.get_original("time", "sleep")
def func1():
return func2()
def func2():
return func3()
def func3():
return func4()
def func4():
return func5()
def func5():
return sleep(1)
def test_collect_truncate():
r = recorder.Recorder()
c = stack.StackCollector(r, nframes=5)
c.start()
func1()
while not r.events[stack.StackSampleEvent]:
pass
c.stop()
e = r.events[stack.StackSampleEvent][0]
assert e.nframes > c.nframes
assert len(e.frames) == c.nframes
def test_collect_once():
r = recorder.Recorder()
s = stack.StackCollector(r)
# Start the collector as we need to have a start time set
with s:
all_events = s.collect()
assert len(all_events) == 2
e = all_events[0][0]
assert e.thread_id > 0
# Thread name is None with gevent
assert isinstance(e.thread_name, (str, type(None)))
assert len(e.frames) >= 1
assert e.frames[0][0].endswith(".py")
assert e.frames[0][1] > 0
assert isinstance(e.frames[0][2], str)
def test_max_time_usage():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=0)
def test_max_time_usage_over():
r = recorder.Recorder()
with pytest.raises(ValueError):
stack.StackCollector(r, max_time_usage_pct=200)
def test_ignore_profiler():
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
events = r.events[stack.StackSampleEvent]
assert thread_id not in {e.thread_id for e in events}
def test_no_ignore_profiler():
r, c, thread_id = test_collector._test_collector_collect(
stack.StackCollector, stack.StackSampleEvent, ignore_profiler=False
)
events = r.events[stack.StackSampleEvent]
assert thread_id in {e.thread_id for e in events}
def test_collect():
test_collector._test_collector_collect(stack.StackCollector, stack.StackSampleEvent)
def test_restart():
test_collector._test_restart(stack.StackCollector)
def test_repr():
test_collector._test_repr(
stack.StackCollector,
"StackCollector(status=<ServiceStatus.STOPPED: 'stopped'>, "
"recorder=Recorder(max_size=49152), max_time_usage_pct=2.0, "
"nframes=64, ignore_profiler=True)",
)
def test_new_interval():
r = recorder.Recorder()
c = stack.StackCollector(r)
new_interval = c._compute_new_interval(1000000)
assert new_interval == 0.049
new_interval = c._compute_new_interval(2000000)
assert new_interval == 0.098
c = stack.StackCollector(r, max_time_usage_pct=10)
new_interval = c._compute_new_interval(200000)
assert new_interval == 0.01
new_interval = c._compute_new_interval(1)
assert new_interval == c.MIN_INTERVAL_TIME
# Function to use for stress-test of polling
MAX_FN_NUM = 30
FN_TEMPLATE = """def _f{num}():
return _f{nump1}()"""
for num in range(MAX_FN_NUM):
if six.PY3:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
else:
exec(FN_TEMPLATE.format(num=num, nump1=num + 1))
exec(
"""def _f{MAX_FN_NUM}():
try:
raise ValueError('test')
except Exception:
sleep(2)""".format(
MAX_FN_NUM=MAX_FN_NUM
)
)
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_stress_threads():
NB_THREADS = 20
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
s = stack.StackCollector(recorder=recorder.Recorder())
number = 10000
with s:
exectime = timeit.timeit(s.collect, number=number)
print("%.3f ms per call" % (1000.0 * exectime / number))
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
@pytest.mark.skipif(TESTING_GEVENT, reason="Test not compatible with gevent")
def test_exception_collection_threads():
NB_THREADS = 5
threads = []
for i in range(NB_THREADS):
t = threading.Thread(target=_f0) # noqa: E149,F821
t.start()
threads.append(t)
r, c, thread_id = test_collector._test_collector_collect(stack.StackCollector, stack.StackExceptionSampleEvent)
exception_events = r.events[stack.StackExceptionSampleEvent]
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
assert e.thread_id in {t.ident for t in threads}
assert isinstance(e.thread_name, str)
assert e.frames == [("<string>", 5, "_f30")]
assert e.nframes == 1
assert e.exc_type == ValueError
for t in threads:
t.join()
@pytest.mark.skipif(not stack.FEATURES["stack-exceptions"], reason="Stack exceptions not supported")
def test_exception_collection():
r = recorder.Recorder()
c = stack.StackCollector(r)
c.start()
try:
raise ValueError("hello")
except Exception:
sleep(1)
c.stop()
exception_events = r.events[stack.StackExceptionSampleEvent]
assert len(exception_events) >= 1
e = exception_events[0]
assert e.timestamp > 0
assert e.sampling_period > 0
if not TESTING_GEVENT:
assert e.thread_id == _thread.get_ident()
assert e.thread_name == "MainThread"
assert e.frames == [(__file__, 207, "test_exception_collection")]
assert e.nframes == 1
assert e.exc_type == ValueError
| [
"noreply@github.com"
] | noreply@github.com |
0bb04d6daa2c716c04fbde77ab879c77528ea093 | db4b094ee9a6d53c3279acabcf96a9580212c064 | /drawing.py | 89ff0e7dd2108e774865830394b1e097ab52c569 | [] | no_license | mayankagg9722/OpenCV-Python | 6b470e03c20ab5ea0a10c639ea4d1870ac252eeb | 08d76e767ea0a71d991ee590334c3c04f41e8cc4 | refs/heads/master | 2021-06-17T19:39:47.581157 | 2017-04-23T23:50:38 | 2017-04-23T23:50:38 | 88,671,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import numpy as np
import cv2
# Add two images in weighted manner
'''
img=cv2.imread('./Black.jpg')
img2=cv2.imread('./captcha.bmp')
img3=cv2.addWeighted(img2,0.3,img2,0.1,0)
cv2.imshow('weighted',img3)
cv2.waitKey(0)
cv2.destroyAllWindows()
'''
# DRAWING Write N using cv2
img=np.zeros((512,512,4),np.uint8)
cv2.line(img, (50, 50), (50, 100), (255,0,0), 5)
cv2.line(img, (50, 50), (100, 100), (255,0,0), 5)
cv2.line(img, (100, 100), (100, 50), (255,0,0), 5)
cv2.rectangle(img, (0,0), (200,200), (0,255,0), 5)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(img,'N using OpenCV',(10,500), font, 1,(255,255,255),2)
cv2.imshow("myimage",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# img = cv2.imread('./captcha.bmp')
# # cv2.imshow('sift_keypoints',img)
# gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# sift = cv2.FastFeatureDetector()
# kp = sift.detect(gray,None)
# img=cv2.drawKeypoints(gray,kp)
# cv2.imshow('sift_keypoints',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
| [
"mayankagg9722@gmail.com"
] | mayankagg9722@gmail.com |
94d7c0551cdba960a411011611df392954869805 | 4388ab8e3bef0048e37cfface6294707243ce350 | /pyHue_Testing.py | 378fee264e1b52816acab547e99da121fa0f62f8 | [] | no_license | EvillerBobUK/pyHue-BridgeLink-Example | 619705e19f06fe6850f25729cc847dc5ba16c7f4 | 7273e38fe303ae34f8f85c8ccc199e8ad5423410 | refs/heads/main | 2023-04-19T08:14:12.529041 | 2021-05-07T14:08:40 | 2021-05-07T14:08:40 | 365,217,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,448 | py | from time import sleep, time
from classes.pyHue_BridgeLink import pyHue_BridgeLink
bl = pyHue_BridgeLink("BridgeOne") # CHANGE THIS NAME TO MATCH YOUR CONFIG FILE
run_put_test = False
run_streaming_test_RGB = False
run_streaming_test_XYB = False
# UNCOMMENT THE OPTIONS BELOW, DEPENDING ON WHICH TEST YOU WANT
# TO RUN
run_put_test = True
#run_streaming_test_RGB = True
#run_streaming_test_XYB = True
if run_put_test == True:
r = bl.put(bl.url,'lights/1/state',{"on":True,"xy":[0.6915,0.3083],"bri":254})
print(r)
r = bl.put(bl.url,'lights/2/state',{"on":True,"xy":[0.6915,0.3083],"bri":254})
print(r)
if run_streaming_test_XYB == True:
loop = 5 # Number of times to broadcast
delay = 0.5 # How long to sleep between each broadcast
bl.enable_streaming()
while loop > 0:
bl.prepare_and_send_broadcast(
[
(1, 0.6915,0.3083, 0.1),
(2, 0.6915,0.3083, 0.1)
],'XYB'
)
delay(0.5)
loop -= 1
bl.disable_streaming()
if run_streaming_test_RGB == True:
loop = 5 # Number of times to broadcast
delay = 0.5 # How long to sleep between each broadcast
bl.enable_streaming()
while loop > 0:
bl.prepare_and_send_broadcast(
[
(1, 255, 0, 0),
(2, 255, 0, 0)
],'RGB'
)
sleep(delay)
loop -= 1
bl.disable_streaming() | [
"jpbishopltd@gmail.com"
] | jpbishopltd@gmail.com |
b7ea7c196a657c03362e5a72b8dc3b5a15f15f9c | eb9c3dac0dca0ecd184df14b1fda62e61cc8c7d7 | /google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/services/services/ad_group_criterion_simulation_service/transports/base.py | ecc835952d95a794ce1333feaca5eb673c52f842 | [
"Apache-2.0"
] | permissive | Tryweirder/googleapis-gen | 2e5daf46574c3af3d448f1177eaebe809100c346 | 45d8e9377379f9d1d4e166e80415a8c1737f284d | refs/heads/master | 2023-04-05T06:30:04.726589 | 2021-04-13T23:35:20 | 2021-04-13T23:35:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,855 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v6.resources.types import ad_group_criterion_simulation
from google.ads.googleads.v6.services.types import ad_group_criterion_simulation_service
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
'google-ads-googleads',
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AdGroupCriterionSimulationServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for AdGroupCriterionSimulationService."""
AUTH_SCOPES = (
'https://www.googleapis.com/auth/adwords',
)
def __init__(
self, *,
host: str = 'googleads.googleapis.com',
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ':' not in host:
host += ':443'
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_ad_group_criterion_simulation: gapic_v1.method.wrap_method(
self.get_ad_group_criterion_simulation,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_ad_group_criterion_simulation(self) -> typing.Callable[
[ad_group_criterion_simulation_service.GetAdGroupCriterionSimulationRequest],
ad_group_criterion_simulation.AdGroupCriterionSimulation]:
raise NotImplementedError
__all__ = (
'AdGroupCriterionSimulationServiceTransport',
)
| [
"bazel-bot-development[bot]@users.noreply.github.com"
] | bazel-bot-development[bot]@users.noreply.github.com |
f2b070ea149d220513bc4c6f264ba0f735450f4a | e4553dce064f69c0c29a637a763c4a260b0bab62 | /gen_prediction.py | 81403ea1c2a951eed0713f33214ec37c1f64bc6f | [
"MIT"
] | permissive | Tamal-Mondal/Hi-DST | c673320b403c9f8b9b42ec473779491414d4a6b8 | 3b8e8ace25d3beb5fa43311cc54a87226c73689f | refs/heads/main | 2023-05-28T08:01:58.797706 | 2021-06-15T15:42:43 | 2021-06-15T15:42:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,969 | py | """
Description :
Generate DST prediction using Hi-DST.
Run Command:
python gen_prediction.py -in=<path of multiWOZ data> -out=<output dir> -key=<any unique key to identify result>
"""
#--------------------------------------------
import math
import time
import datetime
import random
import argparse
import os
import six, re
import json
import shutil
import pandas as pd
import numpy as np
import torch
import torchtext.vocab as vocab
import transformers
from transformers import BertTokenizer, BertModel, BertForQuestionAnswering
import torch.nn as nn
from model_class import SwitchModel, DomainModel, SlotActionModel
analyze = True
#--------------------------------------------
default_path = os.path.join('data', 'mwz2.1')
parser = argparse.ArgumentParser()
parser.add_argument('-in','--in', help='Name of the input directory containing the input files.', required=False, default=default_path)
parser.add_argument('-out','--out', help='path of the output directiory', required=True)
parser.add_argument('-key','--key', help='model key', required=True)
parser.add_argument('-switch_path','--switch_path', help='path of domain change prediction model', required=True)
parser.add_argument('-domain_path','--domain_path', help='path of domain prediction model', required=True)
parser.add_argument('-slot_act_path','--slot_act_path', help='path of slot action prediction model', required=True)
parser.add_argument('-slot_val_path','--slot_val_path', help='path of slot value prediction model', required=True)
args = vars(parser.parse_args())
in_dir = args['in']
out_dir = args['out']
model_key = args['key']
switch_path = args['switch_path']
domain_path = args['domain_path']
slot_act_path = args['slot_act_path']
slot_val_path = args['slot_val_path']
print("Path of input directory : {}".format(in_dir))
print("Path of output directory : {}".format(out_dir))
print("Path of domain change model : {}".format(switch_path))
print("Path of domain model : {}".format(domain_path))
print("Path of slot action model : {}".format(slot_act_path))
print("Path of slot value model : {}".format(slot_val_path))
print("Model key : {}".format(model_key))
if(not os.path.isdir(in_dir)):
print("Input directory {} does not exist.".format(in_dir))
exit(0)
if(not os.path.isdir(out_dir)):
print("Creating output directiory : {}".format(out_dir))
os.mkdir(out_dir)
f_str = "log_test_{}.json".format(model_key)
filename = os.path.join(out_dir, f_str)
print("Output filename : {}".format(filename))
#--------------------------------------------
domain_list = ['police', 'restaurant', 'hotel', 'taxi', 'attraction', 'train', 'hospital']
slot_detail = {'Type': 'type', 'Price': 'price', 'Parking': 'parking', 'Stay': 'stay', 'Day': 'day',
'People': 'people', 'Post': 'post', 'Addr': 'address', 'Dest': 'destination', 'Arrive': 'arrive',
'Depart': 'departure', 'Internet': 'internet', 'Stars': 'stars', 'Phone': 'phone', 'Area': 'area',
'Leave': 'leave', 'Time': 'time', 'Ticket': 'ticket', 'Ref': 'reference', 'Food': 'food',
'Name': 'name', 'Department': 'department', 'Fee': 'fee', 'Id': 'id', 'Car': 'car'}
meta = {'attraction': {'name', 'type', 'area'},
'hotel': {'name', 'type', 'parking', 'area', 'day', 'stay', 'internet', 'people', 'stars', 'price'},
'restaurant': {'name', 'food', 'area', 'day', 'time', 'people', 'price'},
'taxi': {'arrive', 'departure', 'leave', 'destination'},
'train': {'arrive', 'day', 'leave', 'destination', 'departure', 'people'}
}
question_dict = {}
question_dict['type'] = 'What is the type of domain?'
question_dict['price'] = 'What is the price range of the domain?'
question_dict['stay'] = 'How many days to stay in the domain?'
question_dict['day'] = 'What day of the week to book the domain?'
question_dict['people'] = 'A domain booking for how many people?'
question_dict['destination'] = 'What is the destination of the domain?'
question_dict['arrive'] = 'What is the arrival time of the domain?'
question_dict['departure'] = 'What is the departure location of the domain?'
question_dict['stars'] = 'What is the star rating of the domain?'
question_dict['area'] = 'What is the area or location of the domain?'
question_dict['leave'] = 'What is the leaving time of the domain?'
question_dict['food'] = 'What is the food type of the domain?'
question_dict['name'] = 'What is the name of the domain?'
question_dict['time'] = 'What is the booking time of the domain?'
hotel_type = ["hotel", "guesthouse", "guest house", "lodge"]
attraction_type = ['sport', 'entertainment', 'cinema', 'museum', 'theatre', 'church', 'boat', 'architecture', 'college', 'park', 'theater', 'camboats', 'concert', 'park', 'concert', 'hiking', 'historical', 'gallery', 'nightclub', 'special', 'swimming', 'gastropub', 'outdoor', 'pool', 'pub', 'club', 'swim', 'hall', 'movie']
dataset_config = os.path.join('trippy_label_variant', 'multiwoz21.json')
with open(dataset_config, "r", encoding='utf-8') as f:
raw_config = json.load(f)
class_types = raw_config['class_types']
slot_list = raw_config['slots']
label_maps = raw_config['label_maps']
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
#--------------------------------------------
domain_dict = {}
for i, k in enumerate(domain_list):
domain_dict[k] = i
print("domain_dict : {}".format(domain_dict))
slot_dict = {}
slot_rev_dict = {}
for i, k in enumerate(slot_detail):
slot_dict[slot_detail[k]] = i
slot_rev_dict[i] = slot_detail[k]
print("slot_dict : {}".format(slot_dict))
print("slot_rev_dict : {}".format(slot_rev_dict))
#Loading Glove embeddings
glove = vocab.GloVe(name='42B', dim=300, cache='.vector_cache')
print('Loaded {} words from Glove'.format(len(glove.itos)))
def get_word(word):
return glove.vectors[glove.stoi[word]]
#Loading Glove embeddings for slot
matrix_len = len(slot_dict)
weights_matrix = np.zeros((matrix_len, 300))
words_not_found = 0
for i in slot_rev_dict:
try:
weights_matrix[i] = get_word(slot_rev_dict[i])
except KeyError:
words_not_found += 1
print("{} not found".format(slot_rev_dict[i]))
weights_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
print("#Words not found : {}".format(words_not_found))
#Loading Glove embeddings for domain
matrix_len = len(domain_list)
domain_matrix = np.zeros((matrix_len, 300))
domain_not_found = 0
for i in range(len(domain_list)):
try:
domain_matrix[i] = get_word(domain_list[i])
except KeyError:
domain_not_found += 1
print("{} not found".format(domain_list[i]))
domain_matrix[i] = np.random.normal(scale=0.6, size=(300, ))
print("Shape of domain matrix: {}".format(domain_matrix.shape))
print("#Domain not found : {}".format(domain_not_found))
#--------------------------------------------
#Loading domain switch model
switch_model_path = os.path.join(switch_path, 'switch_model.pt')
switch_model = SwitchModel(3)
switch_model.load_state_dict(torch.load(switch_model_path))
switch_model.eval()
print("Switch Model Loaded")
#Loading domain prediction model
domain_model_path = os.path.join(domain_path, 'domain_model.pt')
domain_model = DomainModel(domain_matrix, 2)
domain_model.load_state_dict(torch.load(domain_model_path))
domain_model.eval()
print("Domain Model Loaded")
#Loading slot action model
slot_action_path = os.path.join(slot_act_path, 'slot_action_model.pt')
slot_act_model = SlotActionModel(weights_matrix, domain_matrix, 10)
slot_act_model.load_state_dict(torch.load(slot_action_path))
slot_act_model.eval()
print("Slot Action Model Loaded")
#Loading slot value model
slot_value_model_path = os.path.join(slot_val_path, 'slot_value_model.pt')
slot_value_model = BertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
slot_value_model.load_state_dict(torch.load(slot_value_model_path))
slot_value_model.eval()
print("Slot Value Model Loaded")
#--------------------------------------------
def load_json(data_file):
if os.path.isfile(data_file):
with open(data_file, 'r') as read_file:
data = json.load(read_file)
return data
def load_list_file(list_file):
with open(list_file, 'r') as read_file:
dialog_id_list = read_file.readlines()
dialog_id_list = [l.strip('\n') for l in dialog_id_list]
return dialog_id_list
return
def cleanBeliefState(belief_state):
bs = {}
for k,v in belief_state.items():
if (v!='none'):
bs[k] = v
return bs
def cleanDialogAct(dialog_act):
dst = {}
for k in dialog_act:
if(dialog_act[k] == "do n't care" or dialog_act[k]=="do nt care"):
dst[k] = "dontcare"
else:
dst[k] = dialog_act[k]
return dst
def correctSlotName(slot):
if(slot=="arriveby"):
return "arrive"
elif(slot=="leaveat"):
return "leave"
elif(slot=="pricerange"):
return "price"
else:
return slot
def getBeliefState(belief_state):
bs = {}
for l in range(len(belief_state)):
for sv in belief_state[l]['slots']:
b_key = sv[0]
if("-book" in b_key):
b_key_l = b_key.split(" ")
b_key = b_key_l[0].split("-")[0]+"-"+correctSlotName(b_key_l[1])
else:
b_key = b_key.split("-")[0]+"-"+correctSlotName(b_key.split("-")[1])
if (sv[1]!='none'):
bs[b_key] = sv[1]
return cleanBeliefState(bs)
def getTurnLabel(tl):
turn_label = {}
for l in range(len(tl)):
sv = tl[l]
b_key = sv[0]
if("-book" in b_key):
b_key_l = b_key.split(" ")
b_key = b_key_l[0].split("-")[0]+"-"+correctSlotName(b_key_l[1])
else:
b_key = b_key.split("-")[0]+"-"+correctSlotName(b_key.split("-")[1])
turn_label[b_key] = sv[1]
return cleanBeliefState(turn_label)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def normalize_time(text):
text = re.sub("(\d{1})(a\.?m\.?|p\.?m\.?)", r"\1 \2", text) # am/pm without space
text = re.sub("(^| )(\d{1,2}) (a\.?m\.?|p\.?m\.?)", r"\1\2:00 \3", text) # am/pm short to long form
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2}) ?(\d{2})([^0-9]|$)", r"\1\2 \3:\4\5", text) # Missing separator
text = re.sub("(^| )(\d{2})[;.,](\d{2})", r"\1\2:\3", text) # Wrong separator
#text = re.sub("(^| )(\d{1})[;.,](\d{2})", r" \2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):(\d{2})/", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{1}) (\d{2})", r"\1\2:\3", text) # Wrong separator
text = re.sub("(^| )(\d{2}):!(\d{1})", r"\1\2:1\3", text) # Wrong format
text = re.sub("(^| )(at|from|by|until|after) ?(\d{1,2})([;., ]|$)", r"\1\2 \3:00\4", text) # normalize simple full hour time
text = re.sub("(^| )(\d{1}:\d{2})", r"\g<1>0\2", text) # Add missing leading 0
# Map 12 hour times to 24 hour times
text = re.sub("(\d{2})(:\d{2}) ?p\.?m\.?", lambda x: str(int(x.groups()[0]) + 12 if int(x.groups()[0]) < 12 else int(x.groups()[0])) + x.groups()[1], text)
text = re.sub("(^| )24:(\d{2})", r"\g<1>00:\2", text) # Correct times that use 24 as hour
return text
def normalize_text(utt):
text = convert_to_unicode(utt)
text = text.lower()
text = normalize_time(text)
text = re.sub("n't", " not", text)
text = re.sub("(^| )zero(-| )star([s.,? ]|$)", r"\g<1>0 star\3", text)
text = re.sub("(^| )one(-| )star([s.,? ]|$)", r"\g<1>1 star\3", text)
text = re.sub("(^| )two(-| )star([s.,? ]|$)", r"\g<1>2 star\3", text)
text = re.sub("(^| )three(-| )star([s.,? ]|$)", r"\g<1>3 star\3", text)
text = re.sub("(^| )four(-| )star([s.,? ]|$)", r"\g<1>4 star\3", text)
text = re.sub("(^| )five(-| )star([s.,? ]|$)", r"\g<1>5 star\3", text)
text = re.sub("(^| )(\d{1})-star([s.,? ]|$)", r"\1\2 star\3", text)
text = re.sub("archaelogy", "archaeology", text) # Systematic typo
text = re.sub("mutliple", "multiple", text) # Systematic typo
text = re.sub("(^| )b ?& ?b([.,? ]|$)", r"\1bed and breakfast\2", text) # Normalization
text = re.sub("bed & breakfast", "bed and breakfast", text) # Normalization
return text
def getQuestion(dom, slot, is_ref):
q = ""
if(is_ref):
q = "What is the reference point of {} {}?".format(dom, slot)
else:
q = question_dict[slot]
q = q.replace("domain", dom)
return q.lower()
def getSpanDict(i, log):
span_dict = {}
if(i<0):
return span_dict
t = log[i]
span_info_len = 0
if('span_info' in t.keys()):
span_info_len = len(t['span_info'])
for idx in range(span_info_len):
dom = t['span_info'][idx][0].split("-")[0].lower()
if t['span_info'][idx][1] in slot_detail:
sl = slot_detail[t['span_info'][idx][1]]
span_key = dom+"-"+sl
if(span_key not in span_dict):
v = t['span_info'][idx][2].lower()
span_value = [v, t['span_info'][idx][3], t['span_info'][idx][4]]
span_dict[span_key] = span_value
else:
v = "{}$${}".format(span_dict[span_key][0], t['span_info'][idx][2].lower())
start_idx = "{}$${}".format(span_dict[span_key][1], t['span_info'][idx][3])
end_idx = "{}$${}".format(span_dict[span_key][2], t['span_info'][idx][4])
span_value = [v, start_idx, end_idx]
span_dict[span_key] = span_value
return span_dict
#--------------------------------------------
def getProbability(output):
prob = output[0].detach().numpy()
prob = np.exp(prob)
sm = np.sum(prob)
prob = prob/sm
p = [round(x,4) for x in prob]
return p
def predictSwitch(sys, usr):
max_len = 200
encoding = tokenizer.encode_plus(sys, usr, add_special_tokens = True,
padding='max_length',
truncation=True,
max_length = max_len,
return_attention_mask = True)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
output = switch_model(torch.tensor([input_ids]), torch.tensor([attention_mask]))
pred = torch.argmax(output).item()
prob = getProbability(output)
return pred, prob
def predictDomain(sys, usr, domain_id):
max_len = 200
encoding = tokenizer.encode_plus(sys, usr, add_special_tokens = True,
padding='max_length',
truncation=True,
max_length = max_len,
return_attention_mask = True)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
output = domain_model(torch.tensor([input_ids]), torch.tensor([attention_mask]), torch.tensor([domain_id]))
pred = torch.argmax(output).item()
prob = getProbability(output)
return pred, prob[pred]
def predictDomainList(sys, usr):
pred_dom = []
dom_list = ['restaurant', 'hotel', 'taxi', 'attraction', 'train']
for dom in dom_list:
dom_id = domain_dict[dom]
pred, prob = predictDomain(sys, usr, dom_id)
if(pred==1):
pred_dom.append((dom, prob))
pred_dom.sort(key=lambda tup: tup[1], reverse=True)
return pred_dom
def predictSlotAction(sys, usr, slot_id, domain_id):
max_len = 200
encoding = tokenizer.encode_plus(sys, usr, add_special_tokens = True,
padding='max_length',
truncation=True,
max_length = max_len,
return_attention_mask = True)
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
output = slot_act_model(torch.tensor([input_ids]), torch.tensor([attention_mask]), torch.tensor([slot_id]), torch.tensor([domain_id]))
pred = torch.argmax(output).item()
prob = getProbability(output)
return pred, prob
def extractAnswer(question, text):
max_len = 100
encoding = tokenizer.encode_plus(
question, text,
add_special_tokens = True,
max_length = max_len,
padding='max_length',
truncation=True)
input_ids, attn_masks, token_type_ids = encoding["input_ids"], encoding["attention_mask"], encoding["token_type_ids"]
outputs = slot_value_model(torch.tensor([input_ids]),
attention_mask=torch.tensor([attn_masks]),
token_type_ids=torch.tensor([token_type_ids]))
idx1 = torch.argmax(outputs.start_logits, dim=1).item()
idx2 = torch.argmax(outputs.end_logits, dim=1).item()
# Check if answer in extracted from the question
q_len = len(tokenizer.encode_plus(question)["input_ids"])
if(idx1<q_len or idx2<q_len or idx2<idx1):
answer = "none"
else:
lst = []
for i in range(idx1, idx2+1):
lst.append(input_ids[i])
answer = tokenizer.decode(lst, clean_up_tokenization_spaces=True)
return answer
def getReference(usr, domain, slot, pred_slots, informed_slots):
slot_value = "none"
qs = getQuestion(domain, slot, True)
ref_dom = extractAnswer(qs, usr)
ref_slot = slot
sl_name = ["destination", "departure"]
sl_time = ["arrive", "leave"]
dom_travel = ["taxi", "train"]
dom_list = []
for slot_key in pred_slots:
dom = slot_key.split("-")[0]
if(dom!=domain and dom not in dom_list):
dom_list.append(dom)
if (ref_dom=="none"):
if(len(dom_list)==1):
ref_dom = dom_list[0]
if (ref_dom!="none"):
for dom in domain_list:
if (ref_dom==dom or dom in ref_dom):
ref_dom = dom
break
else:
if(dom=="hotel"):
for v in hotel_type:
if (v in ref_dom):
ref_dom = dom
elif(dom=="attraction"):
for v in attraction_type:
if (v in ref_dom):
ref_dom = dom
if(ref_dom not in dom_travel and slot in sl_name):
ref_slot = "name"
if(ref_dom not in dom_travel and slot in sl_time):
ref_slot = "time"
slot_key = ref_dom + "-" + ref_slot
if(log_print):
print("Ref of {} : {}".format(domain+"-"+slot, slot_key))
if slot_key in pred_slots:
slot_value = pred_slots[slot_key]
if(slot_value=="none" and slot_key in informed_slots):
temp_val = informed_slots[slot_key]
if("$$" in temp_val):
slot_value = temp_val.split("$$")[0]
return slot_value
def predictSlotValue(sys, usr, domain, slot, slot_action, pred_slots, informed_slots):
slot_value = 'none'
if(slot_action==1): #Request
slot_value = '?'
elif(slot_action==2): #Dont care
slot_value = 'dontcare'
elif(slot_action==3): #Yes
slot_value = 'yes'
elif(slot_action==4): #No
slot_value = 'no'
elif(slot_action==5): #Singular
if(slot=='people'):
slot_value = '1'
elif(slot_action==6): #Type
if(domain=='hotel' and slot=='type'):
slot_value = 'hotel'
elif(slot_action==7): #Extract from user
qs = getQuestion(domain, slot, False)
text = usr
slot_value = extractAnswer(qs, text)
elif(slot_action==8): #Extract from sys
slot_key = domain+"-"+ slot
text = normalize_text(sys)
if (slot_key in informed_slots and informed_slots[slot_key]!="none"):
temp_val = informed_slots[slot_key]
if("$$" not in temp_val):
slot_value = temp_val
else:
slot_value = temp_val.split("$$")[0]
else:
qs = getQuestion(domain, slot, False)
slot_value = extractAnswer(qs, text)
elif(slot_action==9): #Copy from previous states
slot_value = getReference(usr, domain, slot, pred_slots, informed_slots)
return slot_value
def isUnseen(sl_key, slot_value, pred_bs):
f = True
if (sl_key in pred_bs):
if(slot_value==pred_bs[sl_key]):
f=False
else:
v = pred_bs[sl_key]
if v in label_maps:
for value_label_variant in label_maps[v]:
if slot_value == value_label_variant:
f = False
break
if (f and slot_value in label_maps):
for value_label_variant in label_maps[slot_value]:
if v == value_label_variant:
f = False
break
return f
def getStringList(l):
return [str(x) for x in l]
def updateReferenceTravel(usr, domain, pred_bs, informed_slots, pred_slots, pred_tl):
sl_name = ["destination", "departure"]
sl_time = ["arrive", "leave"]
dom_travel = ["taxi", "train"]
#print("Updating Ref : {}".format(usr))
ref_slots = []
for sl_key in pred_slots:
if(pred_slots[sl_key][0] == 9):
ref_slots.append(sl_key)
#print("ref_slots : {}".format(ref_slots))
dom_list = []
for slot_key in pred_bs:
dom = slot_key.split("-")[0]
if(dom not in dom_travel and dom not in dom_list):
dom_list.append(dom)
#print("ref_domains : {}".format(dom_list))
#print("prev_bs : {}".format(pred_bs))
#print("pred_tl : {}".format(pred_tl))
if(len(dom_list)==2):
d_dep = dom_list[0]
d_dest = dom_list[1]
dep_key = domain+"-departure"
dest_key = domain+"-destination"
dep_ref = d_dep+"-name"
dest_ref = d_dest+"-name"
leave_key = domain+"-leave"
arrive_key = domain+"-arrive"
time_ref = "restaurant-time"
if(dep_key not in pred_tl and dest_key not in pred_tl):
if (dep_key in ref_slots):
f = False
if(dep_ref in pred_bs):
pred_tl[dep_key] = pred_bs[dep_ref]
f = True
elif(dep_ref in informed_slots):
f = True
pred_tl[dep_key] = informed_slots[dep_ref]
if(f and "restaurant" in dep_ref and leave_key not in pred_tl):
if(time_ref in pred_bs):
pred_tl[leave_key] = pred_bs[time_ref]
if (dest_key in ref_slots):
f = False
if(dest_ref in pred_bs):
pred_tl[dest_key] = pred_bs[dest_ref]
f = True
elif(dest_ref in informed_slots):
pred_tl[dest_key] = informed_slots[dest_ref]
f = True
if(f and "restaurant" in dest_ref and arrive_key not in pred_tl):
if(time_ref in pred_bs):
pred_tl[arrive_key] = pred_bs[time_ref]
else:
v_ref_0 = "none"
v_ref_1 = "none"
s_key = dom_list[0]+"-name"
if(s_key in pred_bs):
v_ref_0 = pred_bs[s_key]
elif(s_key in informed_slots):
v_ref_0 = informed_slots[s_key]
s_key = dom_list[1]+"-name"
if(s_key in pred_bs):
v_ref_1 = pred_bs[s_key]
elif(s_key in informed_slots):
v_ref_1 = informed_slots[s_key]
if(dep_key in pred_tl):
if(dest_key in pred_bs):
v = pred_bs[dest_key]
if(v == pred_tl[dep_key]):
if(log_print):
print("Need to change the value of {}".format(dest_key))
if (v==v_ref_0 and v_ref_1!="none"):
pred_tl[dest_key] = v_ref_1
elif(v==v_ref_1 and v_ref_0!="none"):
pred_tl[dest_key] = v_ref_0
else:
if(dest_key in ref_slots):
if(log_print):
print("Need to set the value of {}".format(dest_key))
v = pred_tl[dep_key]
if (v==v_ref_0 and v_ref_1!="none"):
pred_tl[dest_key] = v_ref_1
elif(v==v_ref_1 and v_ref_0!="none"):
pred_tl[dest_key] = v_ref_0
else:
if(dep_key in pred_bs):
v = pred_bs[dep_key]
if(v == pred_tl[dest_key]):
if(log_print):
print("Need to change the value of {}".format(dep_key))
if (v==v_ref_0 and v_ref_1!="none"):
pred_tl[dep_key] = v_ref_1
elif(v==v_ref_1 and v_ref_0!="none"):
pred_tl[dep_key] = v_ref_0
else:
if(dep_key in ref_slots):
if(log_print):
print("Need to set the value of {}".format(dep_key))
v = pred_tl[dest_key]
if (v==v_ref_0 and v_ref_1!="none"):
pred_tl[dep_key] = v_ref_1
elif(v==v_ref_1 and v_ref_0!="none"):
pred_tl[dep_key] = v_ref_0
#--------------------------------------------
def getPrediction(k, d, dials):
pred_log = {}
dials_log = dials['dialogue']
data_log = d['log']
sys = " "
switch_output = 1
switch_prob = [0.0, 1.0, 0.0]
current_domain = {}
pred_bs = {}
pred_bs_prev = {}
informed_slots = {}
for t in dials_log:
i = t['turn_idx']
idx = 2*i
usr = data_log[idx]['text'].strip().lower()
usr_norm = normalize_text(usr)
span_dict_sys = {}
if(idx>0):
sys = data_log[idx-1]['text'].strip().lower()
span_dict_sys = getSpanDict(idx-1, data_log)
bs = getBeliefState(t['belief_state'])
tl = getTurnLabel(t['turn_label'])
for slot in span_dict_sys:
informed_slots[slot] = span_dict_sys[slot][0]
if(analyze):
print("Turn : {}".format(i))
print("Sys : {}".format(sys))
print("Usr : {}".format(usr_norm))
if(i>0):
switch_output, switch_prob = predictSwitch(sys, usr_norm)
#Run domain prediction when required
if(len(current_domain)==0 or switch_output==1 or len(current_domain)>1):
p_domain = predictDomainList(sys, usr_norm)
if(len(p_domain)>0):
if(i==0):
current_domain[p_domain[0][0]] = str(p_domain[0][1])
else:
current_domain = {}
if(len(p_domain)==1):
current_domain[p_domain[0][0]] = str(p_domain[0][1])
else:
for p_dom in p_domain:
current_domain[p_dom[0]] = str(p_dom[1])
if(switch_output==2):
current_domain = {}
pred_slots = {}
pred_tl = {}
if(switch_output<2):
for dom in current_domain:
slot_set = {}
if dom in meta:
slot_set = meta[dom]
for slot in slot_set:
slot_act, slot_act_prob = predictSlotAction(sys, usr_norm, slot_dict[slot], domain_dict[dom])
sl_key = dom+"-"+slot
if(log_print):
print("Slot act of {}-{} : {} with {}".format(dom, slot, slot_act, slot_act_prob))
pred_slots[sl_key] = [slot_act, getStringList(slot_act_prob)]
if (slot_act>1):
#if(log_print):
# print("Slot act of {}-{} : {} with {}".format(dom, slot, slot_act, slot_act_prob))
slot_value = predictSlotValue(sys, usr_norm, dom, slot, slot_act, pred_bs_prev, informed_slots)
if(slot_value!="none"):
pred_tl[sl_key] = slot_value
if(dom=="taxi"):
updateReferenceTravel(usr_norm, dom, pred_bs_prev, informed_slots, pred_slots, pred_tl)
for sl_key in pred_tl:
if(isUnseen(sl_key, pred_tl[sl_key], pred_bs_prev)):
pred_bs[sl_key] = pred_tl[sl_key]
if(analyze):
print("Switch output : {} - {}".format(switch_output, switch_prob))
print("Current domains : {}".format(current_domain))
print("Current slots : {}".format(pred_slots))
if(analyze):
print("GT TL : {}".format(tl))
print("PR TL : {}".format(pred_tl))
print("GT BS : {}".format(bs))
print("PR BS : {}".format(pred_bs))
print("------------")
pred_log[i] = {}
pred_log[i]['a_sys'] = sys
pred_log[i]['a_usr'] = usr
pred_log[i]['a_usr_norm'] = usr_norm
pred_log[i]['switch'] = [switch_output, getStringList(switch_prob)]
pred_log[i]['domains'] = current_domain.copy()
pred_log[i]['slots'] = pred_slots.copy()
pred_log[i]['gt_turn'] = tl.copy()
pred_log[i]['pr_turn'] = pred_tl.copy()
pred_log[i]['gt'] = bs.copy()
pred_log[i]['pr'] = pred_bs.copy()
pred_bs_prev = pred_bs.copy()
return pred_log
#--------------------------------------------
#Load raw data
dialog_data_file = os.path.join(in_dir, 'data.json')
dialog_data = load_json(dialog_data_file)
dialog_id_list = list(set(dialog_data.keys()))
test_list_file = os.path.join(in_dir, 'testListFile.txt')
test_id_list = load_list_file(test_list_file)
print('# of test dialogs :', len(test_id_list))
test_data = [(k,v) for k, v in dialog_data.items() if k in test_id_list]
assert(len(test_data) == len(test_id_list))
#Load test dials data
dials_path = os.path.join(in_dir, "test_dials.json")
data = load_json(dials_path)
dials_data = {}
for i,d in enumerate(data):
dials_data[d['dialogue_idx']] = d
print('# of test dials dialogs :', len(dials_data))
analyze=False # Set True to analyze a single prediction
log_print = False
result = {}
if(analyze):
#log_print = True #Set True to print more details
# Set dialogue id to be analysed
idx = 'PMUL2437.json'
for k,d in test_data:
if(k in dials_data and k==idx):
print(k)
pred_log = getPrediction(k, d, dials_data[k])
result[k] = pred_log
break
filename = os.path.join(out_dir, "unit_test.json")
print("Output filename : {}".format(filename))
else:
j=0
now = datetime.datetime.now()
print("Starting evaluation of test data at {}".format(now.strftime("%Y-%m-%d %H:%M:%S")))
for k,d in test_data:
if k in dials_data:
pred_log = getPrediction(k, d, dials_data[k])
result[k] = pred_log
j=j+1
if(j%100==0):
now = datetime.datetime.now()
print("Iteration {} completed at {}".format(j, now.strftime("%Y-%m-%d %H:%M:%S")))
result_file = open(filename, "w")
result_file.write(json.dumps(result, indent=4, sort_keys=True))
result_file.close()
print("done")
#-------------------------------------------- | [
"suvodip15@gmail.com"
] | suvodip15@gmail.com |
2f707c0ad76fe2c3e40a640ea863ae18b885b30f | ee9e6f276985a07cde2e42b66793cbfaac67279b | /研究生课程/模式识别/exp3_lc/perceptron.py | 61ffaf1bf682397feb4dad6865241374e19a8838 | [] | no_license | wyjss2015/Coursera | fa90385dfec38c4f35f6fd8b04b493514ffe4dc0 | eca20a080cb565f86b256b49481864e3917a7041 | refs/heads/master | 2020-04-17T19:28:22.390191 | 2017-06-26T17:00:24 | 2017-06-26T17:00:24 | 66,063,456 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,392 | py | # coding:utf-8
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
class perceptron(object):
def __init__(self, X, y):
self.X = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1)
self.y = y
self.w = np.random.rand(self.X.shape[1])
def train(self):
N = X.shape[0]
cnt = 0
i = 0
while cnt < N:
temp = np.dot(self.w, self.X[i]) * self.y[i]
if temp <= 0:
self.w += self.X[i]*self.y[i]
cnt = 0
else:
cnt += 1
i = (i+1)%N
def plot(self):
idx_1 = (self.y==1)
x1_1 = self.X[idx_1, 1]
x2_1 = self.X[idx_1, 2]
idx_2 = (self.y==-1)
x1_2 = self.X[idx_2, 1]
x2_2 = self.X[idx_2, 2]
plt.axis([-1, 3, -1, 3])
plt.plot(x1_1, x2_1, 'o')
plt.plot(x1_2, x2_2, 'x')
plane_x = np.array([np.min(self.X[:, 1]), np.max(self.X[:, 1])])
plane_y = (-self.w[0]-self.w[1]*plane_x)/self.w[2]
plt.plot(plane_x, plane_y)
plt.show()
if __name__ == '__main__':
X = [[1,1],
[2,2],
[2,0],
[0,0],
[1,0],
[0,1]]
y = [1,1,1,-1,-1,-1]
X = np.array(X).astype(float)
y = np.array(y)
model = perceptron(X, y)
model.train()
model.plot()
print model.w
| [
"wyjss08@gmail.com"
] | wyjss08@gmail.com |
eca748f1e46e776e4639c1664a274fa42add6ad7 | 3c969d9588192b803d2ce8e6c7c6030b22745ada | /QuizMaster/quizapi/migrations/0013_auto_20190317_1333.py | ed075c4146d7781323e5b7504c0e5d42da90ef82 | [] | no_license | prem2282/django-quiz-deployment | c6b24c423c8d029b3577105e4d705c2b6d98def1 | 15957adecd7918a91fbe70ecac6c639a757d67c6 | refs/heads/master | 2022-06-08T01:48:04.296666 | 2022-05-30T05:40:35 | 2022-05-30T05:40:35 | 140,092,137 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | # Generated by Django 2.1 on 2019-03-17 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quizapi', '0012_auto_20190317_1326'),
]
operations = [
migrations.AlterField(
model_name='userquiz',
name='userId',
field=models.CharField(blank=True, max_length=50),
),
]
| [
"prem2282@gmail.com"
] | prem2282@gmail.com |
fa30efa790e8083eea57b600264747d53d9c230c | 5c6272383529e4465215e8f9c94eae7bddddcf21 | /1209_dp.py | 1e2bb793730252d7ff96f38f5b7641f7a4aa3187 | [] | no_license | RoafS10755014/algorithm | 97719970fe9521822189b61061d795f41ff310b3 | 191e23e6dbdda2fc0afb3abcbe550d85e606c466 | refs/heads/master | 2023-02-16T16:15:37.055005 | 2021-01-13T15:10:56 | 2021-01-13T15:10:56 | 298,011,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | #Rod cutting problem
maxValue = []
def getmax(rod, n): #rod:長度價格對應表, n:長度
if n<=1:
return rod[n]
else:
for i in range(n):
maxValue.append(getvalue(rod, i, n))
return max(maxValue)
def getvalue(rod, n, maxvalue):
return rod[n] + rod[maxvalue-n]
ROD = [0,1,5,8,9,10,17,17,20,24,30]
userinput = input("please input a number\n")
while True:
if userinput.isdigit():
userinput = int(userinput)
if userinput>10:
userinput = input("please enter a number(0~10)")
else:
break
else:
userinput = input("please input a number\n")
print(getmax(ROD, userinput))
#print(rod)
| [
"noreply@github.com"
] | noreply@github.com |
3bdb961a368f5fd910108a6231df97077fd148bc | 699bd6dc1c94db6df80b90240f2a1b1c8a0efdc9 | /hw2/hw2_code/nndl/knn.py | 5e5dd6f764605be556946d17d28c2c7fa9544478 | [] | no_license | ashwin-s-ranade/ECE-247-Winter-2021-Kao | 9d394c700fd421b3e74c17b52ce10ddad3fcea86 | 53dd6771f0a7cfc52735157eeb975af79cd07882 | refs/heads/master | 2023-03-10T02:36:45.600664 | 2021-02-25T19:23:46 | 2021-02-25T19:23:46 | 337,887,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,370 | py | import numpy as np
import pdb
from scipy.stats import mode
"""
This code was based off of code from cs231n at Stanford University, and modified for ECE C147/C247 at UCLA.
"""
class KNN(object):
def __init__(self):
pass
def train(self, X, y):
"""
Inputs:
- X is a numpy array of size (num_examples, D)
- y is a numpy array of size (num_examples, )
"""
self.X_train = X
self.y_train = y
def compute_distances(self, X, norm=None):
"""
Compute the distance between each test point in X and each training point
in self.X_train.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
- norm: the function with which the norm is taken.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
if norm is None:
norm = lambda x: np.sqrt(np.sum(x**2))
#norm = 2
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
for i in np.arange(num_test):
for j in np.arange(num_train):
# ================================================================ #
# YOUR CODE HERE:
# Compute the distance between the ith test point and the jth
# training point using norm(), and store the result in dists[i, j].
# ================================================================ #
dists[i,j] = norm(X[i] - self.X_train[j])
pass
# ================================================================ #
# END YOUR CODE HERE
# ================================================================ #
return dists
def compute_L2_distances_vectorized(self, X):
"""
Compute the distance between each test point in X and each training point
in self.X_train WITHOUT using any for loops.
Inputs:
- X: A numpy array of shape (num_test, D) containing test data.
Returns:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
is the Euclidean distance between the ith test point and the jth training
point.
"""
num_test = X.shape[0]
num_train = self.X_train.shape[0]
dists = np.zeros((num_test, num_train))
# ================================================================ #
# YOUR CODE HERE:
# Compute the L2 distance between the ith test point and the jth
# training point and store the result in dists[i, j]. You may
# NOT use a for loop (or list comprehension). You may only use
# numpy operations.
#
# HINT: use broadcasting. If you have a shape (N,1) array and
# a shape (M,) array, adding them together produces a shape (N, M)
# array.
# ================================================================ #
#let X be a N x 1 column vector, we want to expand it to a N x M matrix
#let X_train be a 1 x M row vector, we want to expand it to a N x M matrix
#optimized pairwise distances from here: https://www.pythonlikeyoumeanit.com/Module3_IntroducingNumpy/Broadcasting.html#Pairwise-Distances-Using-Broadcasting-%28Unoptimized%29
x_squared = np.sum(X**2, axis=1)[:, np.newaxis]
y_squared = np.sum(self.X_train**2, axis=1)
two_x_y = 2*np.matmul(X, self.X_train.T)
dists = np.sqrt(x_squared + y_squared - two_x_y)
# ================================================================ #
# END YOUR CODE HERE
# ================================================================ #
return dists
def predict_labels(self, dists, k=1):
"""
Given a matrix of distances between test points and training points,
predict a label for each test point.
Inputs:
- dists: A numpy array of shape (num_test, num_train) where dists[i, j]
gives the distance betwen the ith test point and the jth training point.
Returns:
- y: A numpy array of shape (num_test,) containing predicted labels for the
test data, where y[i] is the predicted label for the test point X[i].
"""
num_test = dists.shape[0]
y_pred = np.zeros(num_test)
for i in np.arange(num_test):
# A list of length k storing the labels of the k nearest neighbors to
# the ith test point.
closest_y = []
# ================================================================ #
# YOUR CODE HERE:
# Use the distances to calculate and then store the labels of
# the k-nearest neighbors to the ith test point. The function
# numpy.argsort may be useful.
#
# After doing this, find the most common label of the k-nearest
# neighbors. Store the predicted label of the ith training example
# as y_pred[i]. Break ties by choosing the smaller label.
# ================================================================ #
k_nearest = dists[i].argsort()[:k] #finds the indices
#convert indices -> labels
closest_y = self.y_train[k_nearest]
#find mode, aka the label that appears the most; pick the smaller label to break ties
y_pred[i] = mode(closest_y)[0][0]
# ================================================================ #
# END YOUR CODE HERE
# ================================================================ #
return y_pred
| [
"ashwinranade99@gmail.com"
] | ashwinranade99@gmail.com |
fcb0ac9a2e90fb3003f163171bdf3f9429306a81 | e43ff8f429a6938a4f16edc4b2c94976acbff157 | /ABC/HELLO SPACE/c.py | 7e33c484056d96fed727123096a19a47f8c58635 | [] | no_license | Soule50431/AtCoder | 4fcd6ab6b771d55c90dc62aedd75eb81fd067466 | 118ac5d03630ce143fb50153402eee38e988ae0c | refs/heads/master | 2023-06-18T13:07:13.843361 | 2021-07-14T01:56:20 | 2021-07-14T01:56:20 | 357,827,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | n = int(input())
team = [tuple(map(int, input().split())) for i in range(n)]
def check(x):
comb = set()
for member in team:
comb.add(sum(1 << i for i in range(5) if member[i] >= x))
for x in comb:
for y in comb:
for z in comb:
if x | y | z == 31:
return True
return False
ok = 0
ng = 10**9 + 1
while ng - ok > 1:
mid = (ng + ok) // 2
if check(mid):
ok = mid
else:
ng = mid
print(ok) | [
"h.ekcero.el6no11@outlook.jp"
] | h.ekcero.el6no11@outlook.jp |
7e2a193ccee2f63630c195d74ee27b7a478f75be | 249b97429d91bcd5aff353a682ab559ee9cdb9c1 | /flipkart/accounts/models.py | f31c51e513fb7dee822b33ceb78cff8de750197e | [] | no_license | gowthamkr1994/Flipkart | dac1bdb631fa92709821a85e2121cd67244c7294 | 061306f844b4232105b446893ba6db4b723a92e6 | refs/heads/master | 2020-05-04T17:02:03.954605 | 2019-04-03T13:43:12 | 2019-04-03T13:43:12 | 179,295,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from django.db import models
# Create your models here.
class Signup(models.Model):
id = models.AutoField
first_name = models.CharField(max_length=50, default="")
last_name = models.CharField(max_length=50, default="")
email = models.CharField(max_length=50, default="",primary_key=True)
password = models.CharField(max_length=8, default="")
contact = models.CharField(max_length=12, default="")
address = models.CharField(max_length=300, default="")
def __str__(self):
return self.email
# class Meta():
# Verbose_name_plural="Products"
| [
"gowtham.kolekar123@gmail.com"
] | gowtham.kolekar123@gmail.com |
2f01cbb9aebe793ed7eb8d6d85d167d803cc165f | cb287ea164120432b5a8f01f550c003ef55f6675 | /ex5+.py | b17b4e2bd065bb4e17f5743d5043be2a9061cdfb | [] | no_license | guodonghai901901/GitHub | 0ae9400dabf240dc6d4aacd3dbafdb7f0a8f15ba | d80634ef71797ca02791b4e1d0af33b55855ec2e | refs/heads/master | 2020-06-02T03:59:38.938407 | 2014-03-07T15:13:56 | 2014-03-07T15:13:56 | 17,108,081 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | # -*- coding: utf-8-*-
name = 'Zed A. Shaw'
age = 35 # not a lie
height = 74 # inches
weight = 180 # lbs
eyes = 'Blue'
teeth = 'White'
hair = 'Brown'
print "Let's talk about %s." % name
print "He's %d inches tall." % height
print "He's %d pounds heavy." % weight
print "Actually that's not too heavy."
print "He's got %s eyes and %s hair." % (eyes, hair)
print "His teeth are usually %s depending on the coffee."
print "If I add %d, %d, and %d I get %d." % (age, height, weight, age + height + weight)
#Transfer inch and bang to centimetre and kilo graph
| [
"guodonghai901901@sina.cn"
] | guodonghai901901@sina.cn |
1f4636f0fa095620e31a612233b06bbbf0c1da48 | fc7defba3ad7ffb6b4ad36a0f0ac563b475803da | /python/depthcharge/uboot/__init__.py | d616426dd0f9b03811a2540a8bc5e2780c6fdad3 | [
"BSD-3-Clause"
] | permissive | gavz/depthcharge | a748c5ba11329eb2bf149bb6df0bcd248120e0f3 | 9b66d1c2a80b9398ac561c83173ebd748aef018d | refs/heads/main | 2023-06-28T21:55:39.302047 | 2021-08-02T02:58:06 | 2021-08-02T03:49:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | # SPDX-License-Identifier: BSD-3-Clause
# Depthcharge: <https://github.com/nccgroup/depthcharge>
#
# flake8: noqa=F401
"""
U-Boot centric parsing, conversion, and data processing functionality
"""
from . import board
from . import cmd_table
from . import env
from . import jump_table
from .version import UBootVersion, version_in_range
| [
"jon.szymaniak.foss@gmail.com"
] | jon.szymaniak.foss@gmail.com |
8e86bdf62bd256814055818044df3081b07a50bd | e9628f8b9d86ba587edd36cbccc1d2133290cd5d | /agenda/admin.py | 40d2f928322e99a583ec1ffe8b6d0b62dedf4eab | [] | no_license | krishivpiduri2010/intergallactic | c9fe80a72fd0fafed0c19e5eb5008042e87cbf08 | c1f5c9846de38d4d54d2f311ff39e0a29096432c | refs/heads/master | 2023-06-14T11:29:51.586039 | 2021-07-04T18:53:33 | 2021-07-04T18:53:33 | 382,140,062 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(Object) | [
"krishivpiduri337@gmail.com"
] | krishivpiduri337@gmail.com |
b12de6c619935508db19c3f39260210233e6a4ab | a0801d0e7325b31f0383fc68517e208680bb36d6 | /Kattis/rijeci.py | 7d0bdadf29031e7404c2c5a61ad6cc2e938add57 | [] | no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | K = int(raw_input())
if K == 1:
print 0, 1
else:
x = [0, 1]
y = [1, 1]
for i in range(0, K - 1):
t = [y[0]+x[0], y[1]+x[1]]
x = y
y = t
print x[0], y[0] | [
"conormccauley1999@gmail.com"
] | conormccauley1999@gmail.com |
b6bc57eebf2a6deb99192a6e04622d787e73f0bf | 6656036bd209f6fd5416807954f53e4e1fb27dd1 | /notePC/nfcid_w.py | da97baea00887ea59d258c68f16d86846876b1cb | [
"Apache-2.0"
] | permissive | EnyaKitakagaya/Tonis | 44221215eac7f14e3c5fc717503ee67d5440123b | 414ab7788c5b55a0b76c8837edee3f1c838fe7ec | refs/heads/master | 2021-01-13T08:10:47.380043 | 2016-11-13T02:10:19 | 2016-11-13T02:10:19 | 69,097,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# フェリカカードのIDを/tmp/nfcid に追記し続けるスクリプト by penkich 2016-10-15
# ubuntu をインストールした会員カード登録用ノートPCの /etc/rc.local に置いておき、自動起動させる。
# カード読み取りごとに、サウンドファイルを鳴らす(ラズパイならI/Oポートにブザーつないで鳴らすとよい)。
# サウンドファイルは、フリーのものが種々公開されてるので、利用するとよい。
# 例えば、http://freewavesamples.com/files/E-Mu-Proteus-FX-TubeBels-C6.wav
import nfc
import re
import os
def connected(tag):
# タグのIDなどを出力する
# print tag
a = '%s' % tag
id = re.findall("ID=([0-9A-F]*)",a)[0]
file.write(id)
clf = nfc.ContactlessFrontend('usb')
while (True):
with open("/tmp/nfcid", "a") as file:
clf.connect(rdwr={'on-connect': connected})
os.system("aplay /usr/local/bin/bell.wav")
| [
"noreply@github.com"
] | noreply@github.com |
fbd7868a37a2270e5dc86843adff50a94436404d | ede0a087ea29305857fb4586e730ef1ebf068a86 | /Application/detection.py | c3576ae1208387ed5afedefca6e39cb90cc95933 | [] | no_license | manuel1801/Bachelor_Arbeit | f3e5e33aa57877c890890b8c3584c672cd038816 | 92200717fd34544f9bbea9aa7ad2156bf72080df | refs/heads/master | 2021-07-10T03:29:20.605528 | 2021-03-01T09:06:51 | 2021-03-01T09:06:51 | 219,782,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,307 | py | from openvino.inference_engine import IENetwork, IECore
import numpy as np
import time
from datetime import datetime
import sys
import os
import cv2
class MotionDetect:
# Klasse zur Erkennung von Bewegung
def __init__(self):
self.static_back = None
def detect_motion(self, frame, reset=False):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (21, 21), 0)
if self.static_back is None or reset:
self.static_back = gray
return False
diff_frame = cv2.absdiff(self.static_back, gray)
thresh_frame = cv2.threshold(diff_frame, 50, 255, cv2.THRESH_BINARY)[1]
thresh_frame = cv2.dilate(thresh_frame, None, iterations=2)
cnts, _ = cv2.findContours(thresh_frame.copy(),
cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if cnts:
return True
else:
return False
def reset_background(self):
self.static_back = None
class InferenceModel:
# Klasse zur Erstellung eines 'ExecInferModel' Objekts
def __init__(self, device='MYRIAD'):
self.ie = IECore()
self.device = device
def create_exec_infer_model(self, model_dir, output_dir, num_requests=2):
# Anlegen der Pfade zu den Modell Dateien
model_xml = os.path.join(
model_dir, 'frozen_inference_graph.xml')
model_bin = os.path.join(
model_dir, 'frozen_inference_graph.bin')
exported_model = os.path.join(model_dir, 'exported_model')
# Laden der Labels aus 'classes.txt'
labels = [line.strip() for line in open(
os.path.join(model_dir, 'classes.txt')).readlines()]
assert os.path.isfile(model_bin)
assert os.path.isfile(model_xml)
# Erstellung des Modells aus IR Dateien
net = IENetwork(model=model_xml, weights=model_bin)
# In-Output Shapes des Modells aus 'net' laden
img_info_input_blob = None
feed_dict = {}
for blob_name in net.inputs:
if len(net.inputs[blob_name].shape) == 4:
input_blob = blob_name
elif len(net.inputs[blob_name].shape) == 2:
img_info_input_blob = blob_name
else:
raise RuntimeError("Unsupported {}D input layer '{}'. Only 2D and 4D input layers are supported"
.format(len(net.inputs[blob_name].shape), blob_name))
assert len(
net.outputs) == 1, "Demo supports only single output topologies"
out_blob = next(iter(net.outputs))
# Modell importieren (Falls vorhanden)
if os.path.isfile(exported_model):
print('found model to import')
try:
exec_net = self.ie.import_network(
model_file=exported_model, device_name=self.device, num_requests=num_requests)
except:
return False
else:
# sonst erstellen und exoportieren
print('creating exec model')
try:
exec_net = self.ie.load_network(
network=net, num_requests=num_requests, device_name=self.device)
exec_net.export(exported_model)
except:
return False
nchw = net.inputs[input_blob].shape
del net
if img_info_input_blob:
feed_dict[img_info_input_blob] = [nchw[2], nchw[3], 1]
# ersellen und zurückgeben eines ExecInferModel Objekts, mit welchem die Inferenz ausgeführt wird
return ExecInferModel(exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir)
class ExecInferModel:
def __init__(self, exec_net, input_blob, out_blob, feed_dict, nchw, labels, output_dir):
self.exec_net = exec_net
self.labels = labels
self.input_blob = input_blob
self.out_blob = out_blob
self.feed_dict = feed_dict
self.n, self.c, self.h, self.w = nchw
self.current_frames = {}
self.detected_objects = {}
self.output_dir = output_dir
def infer_frames(self, buffer, threshhold=0.6, view_result=True, n_save=20, save_all=False):
# Status Variablen
n_infered, n_detected, n_saved = 0, 0, 0
# alle Inferenz Requests durchiterieren
for inf_img_ind, infer_request in enumerate(self.exec_net.requests):
res, frame = None, None
# Status der Inferenz für aktuellen Request abfragen
status = infer_request.wait(0)
# 0: ergebnis da, -11: noch nicht gestartet
if status != 0 and status != -11:
continue
# Ergebnis für aktuellen Request holen
if inf_img_ind in self.current_frames:
res = infer_request.outputs[self.out_blob]
frame = self.current_frames[inf_img_ind]
n_infered += 1
# neuen Inferent Request starten
if len(buffer):
self.current_frames[inf_img_ind] = buffer.pop()
in_frame = cv2.resize(
self.current_frames[inf_img_ind], (self.w, self.h))
in_frame = in_frame.transpose((2, 0, 1))
in_frame = in_frame.reshape(
(self.n, self.c, self.h, self.w))
self.feed_dict[self.input_blob] = in_frame
infer_request.async_infer(self.feed_dict)
# Ergebnis verarbeiten
if res is None or frame is None:
continue
height, width = frame.shape[:2]
# inferenz ergebnisse für ein frame durchiterieren
for obj in res[0][0]:
# Threshold prüfen
if obj[2] < threshhold:
continue
n_detected += 1
# Boundig Box koordinalte aus Erg laden
xmin = int(obj[3] * width)
ymin = int(obj[4] * height)
xmax = int(obj[5] * width)
ymax = int(obj[6] * height)
# ID der erkannten Klasse
class_id = int(obj[1])
# Bounding Box in das Bild zeichnen
cv2.rectangle(frame, (xmin, ymin),
(xmax, ymax), color=(0, 255, 255), thickness=2)
cv2.putText(frame, self.labels[class_id - 1] + ' ' + str(round(obj[2] * 100, 1)) + '%', (xmin, ymin - 7),
cv2.FONT_HERSHEY_COMPLEX, 0.6, (0, 255, 255), 1)
# detected_objects dict anlegen mit key:class_id, value:[N, Roi, proba]
if not class_id in self.detected_objects:
self.detected_objects[class_id] = [
0, frame, obj[2]]
else:
self.detected_objects[class_id][0] += 1
# wenn wahrscheinlichkeit höher als bei gespeicherten, ersetzen
if self.detected_objects[class_id][2] < obj[2]:
self.detected_objects[class_id][1] = frame
self.detected_objects[class_id][2] = obj[2]
# nach 'n_save' abspeicher
if self.detected_objects[class_id][0] > n_save:
n_saved += 1
self._save(class_id)
del self.detected_objects[class_id]
if view_result:
cv2.imshow('infer result', frame)
cv2.waitKey(1)
# alle aus 'detected_objects' lokal speichern
if save_all:
print('saving all')
for class_id in self.detected_objects.keys():
self._save(class_id)
n_saved += 1
self.detected_objects = {}
return n_infered, n_detected, n_saved
# Funkiont zum speichern der Bilder
def _save(self, class_id):
class_name = self.labels[class_id - 1]
print('saving ', class_name)
time_stamp = datetime.now().strftime("%d-%b-%Y_%H-%M-%S")
file_name = time_stamp + '_' + class_name + '.jpg'
image_array = self.detected_objects[class_id][1]
# save image local
cv2.imwrite(os.path.join(
self.output_dir, file_name), image_array)
| [
"manuel.barkey@web.de"
] | manuel.barkey@web.de |
49c05d2676b8eed51218f2ef3306bf504397a1b1 | 0f0a7adfae45e07a896c5cd5648ae081d4ef7790 | /python数据结构/python黑马数据结构/排序于搜索/桶排序2.py | 081bee496e92f52adc6aa7b5f6d0b08d0687b4c3 | [] | no_license | renlei-great/git_window- | e2c578544c7a8bdd97a7a9da7be0464d6955186f | 8bff20a18d7bbeeaf714aa49bf15ab706153cc28 | refs/heads/master | 2021-07-19T13:09:01.075494 | 2020-06-13T06:14:37 | 2020-06-13T06:14:37 | 227,722,554 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | lista = [12, 4, 5, 6, 22, 3, 3, 3, 3, 43, 654, 765, 7, 234]
def pail_sort(alist):
"""桶排序"""
n = len(alist)
cur = 0
while cur < n-1:
if alist[cur] > alist[cur+1]:
max_num = alist[cur]
cur += 1
max_li = [0] * (max_num +1)
for i in alist:
max_li[i] += 1
print(max_li)
sort_num = []
for i in range(len(max_li)):
if max_li[i] != 0:
print(i)
ex = 'sort_num.append(i)\n' * max_li[i]
print(ex)
exec(ex)
return sort_num
if __name__ == "__main__":
new_li = pail_sort(lista)
print(new_li) | [
"1415977534@qq.com"
] | 1415977534@qq.com |
d82cb4496515ddff73ac55165464858ee6725a1d | 9b1eda0abdc5dea7c6e9695ff4e1098abe0a708b | /tests/option_list/test_option_prompt_replacement.py | ef11b75383c49a7f4a5689e95c0c50375011d533 | [
"MIT"
] | permissive | Textualize/textual | b8cf4b5d18069fccc7623b3116436f479e1ef446 | b74ac1e47fdd16133ca567390c99ea19de278c5a | refs/heads/main | 2023-08-30T21:40:21.563823 | 2023-08-30T10:18:27 | 2023-08-30T10:18:27 | 355,959,597 | 14,818 | 588 | MIT | 2023-09-14T20:22:02 | 2021-04-08T15:24:47 | Python | UTF-8 | Python | false | false | 3,245 | py | """Test replacing options prompt from an option list."""
import pytest
from textual.app import App, ComposeResult
from textual.widgets import OptionList
from textual.widgets.option_list import Option, OptionDoesNotExist
class OptionListApp(App[None]):
"""Test option list application."""
def compose(self) -> ComposeResult:
yield OptionList(
Option("0", id="0"),
Option("line1\nline2"),
)
async def test_replace_option_prompt_with_invalid_id() -> None:
"""Attempting to replace the prompt of an option ID that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).replace_option_prompt("does-not-exist", "new-prompt")
async def test_replace_option_prompt_with_invalid_index() -> None:
"""Attempting to replace the prompt of an option index that doesn't exist should raise an exception."""
async with OptionListApp().run_test() as pilot:
with pytest.raises(OptionDoesNotExist):
pilot.app.query_one(OptionList).replace_option_prompt_at_index(23, "new-prompt")
async def test_replace_option_prompt_with_valid_id() -> None:
"""It should be possible to replace the prompt of an option ID that does exist."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", "new-prompt")
assert option_list.get_option("0").prompt == "new-prompt"
async def test_replace_option_prompt_with_valid_index() -> None:
"""It should be possible to replace the prompt of an option index that does exist."""
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList).replace_option_prompt_at_index(1, "new-prompt")
assert option_list.get_option_at_index(1).prompt == "new-prompt"
async def test_replace_single_line_option_prompt_with_multiple() -> None:
"""It should be possible to replace single line prompt with multiple lines """
new_prompt = "new-prompt\nsecond line"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", new_prompt)
assert option_list.get_option("0").prompt == new_prompt
async def test_replace_multiple_line_option_prompt_with_single() -> None:
"""It should be possible to replace multiple line prompt with a single line"""
new_prompt = "new-prompt"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt("0", new_prompt)
assert option_list.get_option("0").prompt == new_prompt
async def test_replace_multiple_line_option_prompt_with_multiple() -> None:
"""It should be possible to replace multiple line prompt with multiple lines"""
new_prompt = "new-prompt\nsecond line"
async with OptionListApp().run_test() as pilot:
option_list = pilot.app.query_one(OptionList)
option_list.replace_option_prompt_at_index(1, new_prompt)
assert option_list.get_option_at_index(1).prompt == new_prompt
| [
"szymon.sc.cader@gmail.com"
] | szymon.sc.cader@gmail.com |
0e5709a6bfd68d4a0c7b6e54eab0a0a3b00dcb54 | 7f5f4f3e096ad050d829b6d6f65292a803936bed | /Day1.py | 77efc21afce7d2384da920f39165ce387e5ac728 | [] | no_license | WiTe0DeViL/LetsUpgrade_Projects | 2d8ecf6d34f7337cc6ace16818e7b498d1f64986 | e363870450e44928609bc418c3633412538f81a0 | refs/heads/main | 2023-07-09T16:56:24.464358 | 2021-08-22T15:22:30 | 2021-08-22T15:22:30 | 397,113,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | x = int(input("Enter a number for x : "))
y = int(input("Enter a number for y : "))
result = x ** y
print(f"Hence {x} to th power of {y} is {result}")
| [
"noreply@github.com"
] | noreply@github.com |
c97a97cc33c8fb773f980e478fd7837712bd7cbc | df34ce4d6d84db4ef3d5f1e316217fd3dc8331ac | /process_scripts.py | bb4379444510c947f69aa7384d0c98edc0d9ca92 | [] | no_license | Shirlynmishra/veloce_reduction | 046c3b8292cf752e8c680f3cb8088a874e55f383 | 47f7d13236dfe2120c127357e455bc0184d2f2f9 | refs/heads/master | 2020-08-04T21:52:35.819387 | 2019-09-11T10:54:08 | 2019-09-11T10:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,984 | py | '''
Created on 25 Jul. 2018
@author: christoph
'''
import astropy.io.fits as pyfits
import numpy as np
import time
import os
import glob
from veloce_reduction.veloce_reduction.helper_functions import binary_indices, laser_on, thxe_on
from veloce_reduction.veloce_reduction.calibration import correct_for_bias_and_dark_from_filename
from veloce_reduction.veloce_reduction.cosmic_ray_removal import remove_cosmics, median_remove_cosmics
from veloce_reduction.veloce_reduction.background import extract_background, extract_background_pid, fit_background
from veloce_reduction.veloce_reduction.order_tracing import extract_stripes
from veloce_reduction.veloce_reduction.extraction import extract_spectrum, extract_spectrum_from_indices
from veloce_reduction.veloce_reduction.relative_intensities import get_relints, get_relints_from_indices, append_relints_to_FITS
from veloce_reduction.veloce_reduction.get_info_from_headers import get_obs_coords_from_header
from veloce_reduction.veloce_reduction.barycentric_correction import get_barycentric_correction
def process_whites(white_list, MB=None, ronmask=None, MD=None, gain=None, P_id=None, scalable=False, fancy=False, remove_bg=True, clip=5., savefile=True, saveall=False, diffimg=False, path=None, debug_level=0, timit=False):
"""
This routine processes all whites from a given list of files. It corrects the orientation of the image and crops the overscan regions,
and subtracts both the MASTER BIAS frame [in ADU], and the MASTER DARK frame [in e-] from every image before combining them to create a MASTER WHITE frame.
NOTE: the input image has units of ADU, but the output image has units of electrons!!!
INPUT:
'white_list' : list of filenames of raw white images (incl. directories)
'MB' : the master bias frame (bias only, excluding OS levels) [ADU]
'ronmask' : the read-noise mask (or frame) [e-]
'MD' : the master dark frame [e-]
'gain' : the gains for each quadrant [e-/ADU]
'P_id' : order tracing dictionary (only needed if remove_bg is set to TRUE)
'scalable' : boolean - do you want to normalize the dark current to an exposure time of 1s? (ie do you want to make it "scalable"?)
'fancy' : boolean - do you want to use the 'fancy' method for creating the master white frame? (otherwise a simple median image will be used)
'remove_bg' : boolean - do you want to remove the background from the output master white?
'clip' : number of 'expected-noise sigmas' a pixel has to deviate from the median pixel value across all images to be considered an outlier when using the 'fancy' method
'savefile' : boolean - do you want to save the master white frame as a FITS file?
'saveall' : boolean - do you want to save all individual bias- & dark-corrected images as well?
'diffimg' : boolean - do you want to save the difference image (ie containing the outliers)? only used if 'fancy' is set to TRUE
'path' : path to the output file directory (only needed if savefile is set to TRUE)
'debug_level' : for debugging...
'timit' : boolean - do you want to measure execution run time?
OUTPUT:
'master' : the master white image [e-] (also has been brought to 'correct' orientation, overscan regions cropped, and (if desired) bg-corrected)
'err_master' : the corresponding uncertainty array [e-]
"""
if timit:
start_time = time.time()
if debug_level >= 1:
print('Creating master white frame from '+str(len(white_list))+' fibre flats...')
# if INPUT arrays are not given, read them from default files
if path is None:
print('WARNING: output file directory not provided!!!')
print('Using same directory as input file...')
dum = white_list[0].split('/')
path = white_list[0][0:-len(dum[-1])]
if MB is None:
# no need to fix orientation, this is already a processed file [ADU]
# MB = pyfits.getdata(path+'master_bias.fits')
MB = pyfits.getdata(path + 'median_bias.fits')
if ronmask is None:
# no need to fix orientation, this is already a processed file [e-]
ronmask = pyfits.getdata(path + 'read_noise_mask.fits')
if MD is None:
if scalable:
# no need to fix orientation, this is already a processed file [e-]
MD = pyfits.getdata(path + 'master_dark_scalable.fits', 0)
# err_MD = pyfits.getdata(path+'master_dark_scalable.fits', 1)
else:
# no need to fix orientation, this is already a processed file [e-]
texp = pyfits.getval(white_list[0])
MD = pyfits.getdata(path + 'master_dark_t' + str(int(np.round(texp,0))) + '.fits', 0)
# err_MD = pyfits.getdata(path+'master_dark_t'+str(int(np.round(texp,0)))+'.fits', 1)
# prepare arrays
allimg = []
allerr = []
# loop over all files in "white_list"; correct for bias and darks on the fly
for n,fn in enumerate(sorted(white_list)):
if debug_level >=1:
print('Now processing file ' + str(n+1) + '/' + str(len(white_list)) + ' (' + fn + ')')
# call routine that does all the bias and dark correction stuff and converts from ADU to e-
if scalable:
# if the darks have a different exposure time than the whites, then we need to re-scale the master dark
texp = pyfits.getval(white_list[0], 'ELAPSED')
img = correct_for_bias_and_dark_from_filename(fn, MB, MD*texp, gain=gain, scalable=scalable, savefile=saveall,
path=path, timit=timit) #these are now bias- & dark-corrected images; units are e-
else:
img = correct_for_bias_and_dark_from_filename(fn, MB, MD, gain=gain, scalable=scalable, savefile=saveall,
path=path, timit=timit) # these are now bias- & dark-corrected images; units are e-
if debug_level >=2:
print('min(img) = ' + str(np.min(img)))
allimg.append(img)
# err_img = np.sqrt(img + ronmask*ronmask) # [e-]
# TEMPFIX: (how should I be doing this properly???)
err_img = np.sqrt(np.clip(img,0,None) + ronmask*ronmask) # [e-]
allerr.append(err_img)
# list of individual exposure times for all whites (should all be the same, but just in case...)
texp_list = [pyfits.getval(file, 'ELAPSED') for file in white_list]
# scale to the median exposure time
tscale = np.array(texp_list) / np.median(texp_list)
#########################################################################
### now we do essentially what "CREATE_MASTER_IMG" does for whites... ###
#########################################################################
# add individual-image errors in quadrature (need it either way, not only for fancy method)
err_summed = np.sqrt(np.sum((np.array(allerr)**2), axis=0))
# # get plain median image
# medimg = np.median(np.array(allimg), axis=0)
# take median after scaling to median exposure time
medimg = np.median(np.array(allimg) / tscale.reshape(len(allimg), 1, 1), axis=0)
if fancy:
# need to create a co-added frame if we want to do outlier rejection the fancy way
summed = np.sum((np.array(allimg)), axis=0)
if diffimg:
diff = np.zeros(summed.shape)
master_outie_mask = np.zeros(summed.shape, dtype='int')
# make sure we do not have any negative pixels for the sqrt
medimgpos = medimg.copy()
medimgpos[medimgpos < 0] = 0.
med_sig_arr = np.sqrt(medimgpos + ronmask*ronmask) # expected STDEV for the median image (from LB Eq 2.1); still in ADUs
for n,img in enumerate(allimg):
# outie_mask = np.abs(img - medimg) > clip*med_sig_arr
outie_mask = (img - medimg) > clip*med_sig_arr # do we only want HIGH outliers, ie cosmics?
# save info about which image contributes the outlier pixel using unique binary numbers technique
master_outie_mask += (outie_mask * 2**n).astype(int)
# see which image(s) produced the outlier(s) and replace outies by mean of pixel value from remaining images
n_outie = np.sum(master_outie_mask > 0)
print('Correcting '+str(n_outie)+' outliers...')
# loop over all outliers
for i,j in zip(np.nonzero(master_outie_mask)[0],np.nonzero(master_outie_mask)[1]):
# access binary numbers and retrieve component(s)
outnum = binary_indices(master_outie_mask[i,j]) # these are the indices (within allimg) of the images that contain outliers
dumix = np.arange(len(white_list))
# remove the images containing the outliers in order to compute mean from the remaining images
useix = np.delete(dumix,outnum)
if diffimg:
diff[i,j] = summed[i,j] - ( len(outnum) * np.mean( np.array([allimg[q][i,j] for q in useix]) ) + np.sum( np.array([allimg[q][i,j] for q in useix]) ) )
# now replace value in master image by the sum of all pixel values in the unaffected pixels
# plus the number of affected images times the mean of the pixel values in the unaffected images
summed[i,j] = len(outnum) * np.mean( np.array([allimg[q][i,j] for q in useix]) ) + np.sum( np.array([allimg[q][i,j] for q in useix]) )
# once we have finished correcting the outliers, we want to "normalize" (ie divide by number of frames) the master image and the corresponding error array
master = summed / len(white_list)
err_master = err_summed / len(white_list)
else:
# ie not fancy, just take the median image to remove outliers
# now set master image equal to median image
master = medimg.copy()
nw = len(white_list) # number of whites
# # estimate of the corresponding error array (estimate only!!!)
# err_master = err_summed / nw # I don't know WTF I was thinking here...
# if roughly Gaussian distribution of values: error of median ~= 1.253*error of mean
# err_master = 1.253 * np.std(allimg, axis=0) / np.sqrt(nw-1) # normally it would be sigma/sqrt(n), but np.std is dividing by sqrt(n), not by sqrt(n-1)
# need to rescale by exp time here, too
err_master = 1.253 * np.std(np.array(allimg) / tscale.reshape(len(allimg), 1, 1), axis=0) / np.sqrt(nw-1) # normally it would be sigma/sqrt(n), but np.std is dividing by sqrt(n), not by sqrt(n-1)
# err_master = np.sqrt( np.sum( (np.array(allimg) - np.mean(np.array(allimg), axis=0))**2 / (nw*(nw-1)) , axis=0) ) # that is equivalent, but slower
# now subtract background (errors remain unchanged)
if remove_bg:
# identify and extract background
bg = extract_background_pid(master, P_id, slit_height=30, exclude_top_and_bottom=True, timit=timit)
# fit background
bg_coeffs, bg_img = fit_background(bg, clip=10, return_full=True, timit=timit)
# subtract background
master = master - bg_img
# now save master white to file
if savefile:
outfn = path+'master_white.fits'
pyfits.writeto(outfn, master, clobber=True)
pyfits.setval(outfn, 'HISTORY', value=' MASTER WHITE frame - created '+time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())+' (GMT)')
# pyfits.setval(outfn, 'EXPTIME', value=texp, comment='exposure time [s]')
pyfits.setval(outfn, 'UNITS', value='ELECTRONS')
if fancy:
pyfits.setval(outfn, 'METHOD', value='fancy', comment='method to create master white and remove outliers')
else:
pyfits.setval(outfn, 'METHOD', value='median', comment='method to create master white and remove outliers')
h = pyfits.getheader(outfn)
h_err = h.copy()
h_err['HISTORY'] = 'estimated uncertainty in MASTER WHITE frame - created '+time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())+' (GMT)'
pyfits.append(outfn, err_master, h_err, clobber=True)
# also save the difference image if desired
if diffimg:
hdiff = h.copy()
hdiff['HISTORY'] = ' MASTER WHITE DIFFERENCE IMAGE - created '+time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())+' (GMT)'
pyfits.writeto(path+'master_white_diffimg.fits', diff, hdiff, clobber=True)
if timit:
print('Total time elapsed: '+str(np.round(time.time() - start_time,1))+' seconds')
return master, err_master
def process_science_images(imglist, P_id, chipmask, mask=None, stripe_indices=None, quick_indices=None, sampling_size=25, slit_height=32, qsh=23, gain=[1.,1.,1.,1.], MB=None, ronmask=None, MD=None, scalable=False, saveall=False, path=None, ext_method='optimal',
from_indices=True, slope=True, offset=True, fibs='all', date=None, timit=False):
"""
Process all science / calibration lamp images. This includes:
(1) bias and dark subtraction
(2) cosmic ray removal
(3) background extraction and estimation
(4) flat-fielding (ie removal of pixel-to-pixel sensitivity variations)
=============================
(5) extraction of stripes
(6) extraction of 1-dim spectra
(7) get relative intensities of different fibres
(8) wavelength solution
(9) barycentric correction (for stellar observations only)
"""
print('WARNING: I commented out BARCYRORR')
# cont = raw_input('Do you still want to continue?')
cont='y'
assert cont.lower() == 'y', 'You chose to quit!'
if timit:
start_time = time.time()
# sort image list, just in case
imglist.sort()
# get a list with the object names
object_list = [pyfits.getval(file, 'OBJECT').split('+')[0] for file in imglist]
if object_list[0] == 'ARC - ThAr':
obstype = 'ARC'
elif object_list[0].lower() in ["lc", "lc-only", "lfc", "lfc-only", "simlc", "thxe", "thxe-only", "simth", "thxe+lfc", "lfc+thxe", "lc+simthxe", "lc+thxe"]:
obstype = 'simcalib'
else:
obstype = 'stellar'
if obstype in ['stellar', 'ARC']:
# and the indices where the object changes (to figure out which observations belong to one epoch)
changes = np.where(np.array(object_list)[:-1] != np.array(object_list)[1:])[0] + 1 # need the plus one to get the indices of the first occasion of a new object
# list of indices for individual epochs - there's gotta be a smarter way to do this...
all_epoch_list = []
if len(changes) > 0:
all_epoch_list.append(np.arange(0,changes[0]))
for j in range(len(changes) - 1):
all_epoch_list.append(np.arange(changes[j], changes[j+1]))
all_epoch_list.append(np.arange(changes[-1], len(object_list)))
else:
all_epoch_list.append(np.arange(0, len(object_list)))
#####################################
### (1) bias and dark subtraction ###
#####################################
# if INPUT arrays are not given, read them from default files
if path is None:
print('WARNING: output file directory not provided!!!')
print('Using same directory as input file...')
dum = imglist[0].split('/')
path = imglist[0][0: -len(dum[-1])]
if MB is None:
# no need to fix orientation, this is already a processed file [ADU]
# MB = pyfits.getdata(path + 'master_bias.fits')
MB = pyfits.getdata(path + 'median_bias.fits')
if ronmask is None:
# no need to fix orientation, this is already a processed file [e-]
ronmask = pyfits.getdata(path + 'read_noise_mask.fits')
if MD is None:
if scalable:
# no need to fix orientation, this is already a processed file [e-]
MD = pyfits.getdata(path + 'master_dark_scalable.fits', 0)
# err_MD = pyfits.getdata(path + 'master_dark_scalable.fits', 1)
else:
# no need to fix orientation, this is already a processed file [e-]
print('WARNING: scalable KW not properly implemented (stellar_list can have different exposure times...)')
texp = 600.
MD = pyfits.getdata(path + 'master_dark_t' + str(int(np.round(texp, 0))) + '.fits', 0)
# err_MD = pyfits.getdata(path + 'master_dark_t' + str(int(np.round(texp, 0))) + '.fits', 1)
if not from_indices:
ron_stripes = extract_stripes(ronmask, P_id, return_indices=False, slit_height=slit_height, savefiles=False, timit=True)
# loop over all files
for i,filename in enumerate(imglist):
# (0) do some housekeeping with filenames, and check if there are multiple exposures for a given epoch of a star
dum = filename.split('/')
dum2 = dum[-1].split('.')
obsname = dum2[0]
obsnum = int(obsname[-5:])
object = pyfits.getval(filename, 'OBJECT').split('+')[0]
object_indices = np.where(object == np.array(object_list))[0]
texp = pyfits.getval(filename, 'ELAPSED')
# check if this exposure belongs to the same epoch as the previous one
if obstype in ['stellar', 'ARC']:
if i > 0:
if filename in epoch_list:
new_epoch = False
else:
new_epoch = True
# delete existing temp bg files so we don't accidentally load them for a wrong epoch
if os.path.isfile(path + 'temp_bg_lfc.fits'):
os.remove(path + 'temp_bg_lfc.fits')
if os.path.isfile(path + 'temp_bg_thxe.fits'):
os.remove(path + 'temp_bgthxe.fits')
if os.path.isfile(path + 'temp_bg_both.fits'):
os.remove(path + 'temp_bg_both.fits')
if os.path.isfile(path + 'temp_bg_neither.fits'):
os.remove(path + 'temp_bg_neither.fits')
else:
new_epoch = True
# delete existing temp bg files so we don't accidentally load them for a wrong epoch
if os.path.isfile(path + 'temp_bg_lfc.fits'):
os.remove(path + 'temp_bg_lfc.fits')
if os.path.isfile(path + 'temp_bg_thxe.fits'):
os.remove(path + 'temp_bgthxe.fits')
if os.path.isfile(path + 'temp_bg_both.fits'):
os.remove(path + 'temp_bg_both.fits')
if os.path.isfile(path + 'temp_bg_neither.fits'):
os.remove(path + 'temp_bg_neither.fits')
else:
if i == 0:
new_epoch = True
else:
new_epoch = False
print('Extracting ' + obstype + ' spectrum ' + str(i + 1) + '/' + str(len(imglist)) + ': ' + obsname)
if obstype in ['stellar', 'ARC']:
# list of all the observations belonging to this epoch
epoch_ix = [sublist for sublist in all_epoch_list if i in sublist] # different from object_indices, as epoch_ix contains only indices for this particular epoch if there are multiple epochs of a target in a given night
epoch_list = list(np.array(imglist)[epoch_ix])
# make sublists according to the four possible calibration lamp configurations
epoch_sublists = {'lfc':[], 'thxe':[], 'both':[], 'neither':[]}
if int(date) < 20190503:
# look at the actual 2D image (using chipmasks for LFC and simThXe) to determine which calibration lamps fired
for file in epoch_list:
img = correct_for_bias_and_dark_from_filename(file, MB, MD, gain=gain, scalable=scalable, savefile=saveall, path=path)
lc = laser_on(img, chipmask)
thxe = thxe_on(img, chipmask)
if (not lc) and (not thxe):
epoch_sublists['neither'].append(file)
elif (lc) and (thxe):
epoch_sublists['both'].append(file)
else:
if lc:
epoch_sublists['lfc'].append(file)
elif thxe:
epoch_sublists['thxe'].append(file)
# now check the calibration lamp configuration for the main observation in question
img = correct_for_bias_and_dark_from_filename(filename, MB, MD, gain=gain, scalable=scalable,
savefile=saveall, path=path)
lc = laser_on(img, chipmask)
thxe = thxe_on(img, chipmask)
if (not lc) and (not thxe):
lamp_config = 'neither'
elif (lc) and (thxe):
lamp_config = 'both'
else:
if lc:
lamp_config = 'lfc'
elif thxe:
lamp_config = 'thxe'
else:
# since May 2019 the header keywords are correct, so check for LFC / ThXe in header, as that is MUCH faster
for file in epoch_list:
lc = 0
thxe = 0
h = pyfits.getheader(file)
if 'LCNEXP' in h.keys(): # this indicates the latest version of the FITS headers (from May 2019 onwards)
if ('LCEXP' in h.keys()) or ('LCMNEXP' in h.keys()): # this indicates the LFC actually was actually exposed (either automatically or manually)
lc = 1
else: # if not, just go with the OBJECT field
if ('LC' in pyfits.getval(filename, 'OBJECT').split('+')) or ('LFC' in pyfits.getval(filename, 'OBJECT').split('+')):
lc = 1
if h['SIMCALTT'] > 0:
thxe = 1
assert lc+thxe in [0,1,2], 'ERROR: could not establish status of LFC and simultaneous ThXe for ' + obsname + '.fits !!!'
if lc+thxe == 0:
epoch_sublists['neither'].append(file)
elif lc+thxe == 1:
if lc == 1:
epoch_sublists['lfc'].append(file)
else:
epoch_sublists['thxe'].append(file)
elif lc+thxe == 2:
epoch_sublists['both'].append(file)
# now check the calibration lamp configuration for the main observation in question
lc = 0
thxe = 0
h = pyfits.getheader(filename)
if 'LCNEXP' in h.keys(): # this indicates the latest version of the FITS headers (from May 2019 onwards)
if ('LCEXP' in h.keys()) or ('LCMNEXP' in h.keys()): # this indicates the LFC actually was actually exposed (either automatically or manually)
lc = 1
else: # if not latest header version, just go with the OBJECT field
if ('LC' in pyfits.getval(filename, 'OBJECT').split('+')) or ('LFC' in pyfits.getval(filename, 'OBJECT').split('+')):
lc = 1
if h['SIMCALTT'] > 0:
thxe = 1
if lc + thxe == 0:
lamp_config = 'neither'
elif lc + thxe == 1:
if lc == 1:
lamp_config = 'lfc'
else:
lamp_config = 'thxe'
elif lc + thxe == 2:
lamp_config = 'both'
else:
# for sim. calibration images we don't need to check for the calibration lamp configuration for all exposures (done external to this function)!
# just for the file in question and then create a dummy copy of the image list so that it is in the same format that ix expected for stellar
# observations
if int(date) < 20190503:
# now check the calibration lamp configuration for the main observation in question
img = correct_for_bias_and_dark_from_filename(filename, MB, MD, gain=gain, scalable=scalable, savefile=saveall, path=path)
lc = laser_on(img, chipmask)
thxe = thxe_on(img, chipmask)
if (not lc) and (not thxe):
lamp_config = 'neither'
elif (lc) and (thxe):
lamp_config = 'both'
else:
if lc:
lamp_config = 'lfc'
elif thxe:
lamp_config = 'thxe'
else:
# now check the calibration lamp configuration for the main observation in question
lc = 0
thxe = 0
h = pyfits.getheader(filename)
if 'LCNEXP' in h.keys(): # this indicates the latest version of the FITS headers (from May 2019 onwards)
if ('LCEXP' in h.keys()) or (
'LCMNEXP' in h.keys()): # this indicates the LFC actually was actually exposed (either automatically or manually)
lc = 1
else: # if not latest header version, just go with the OBJECT field
if ('LC' in pyfits.getval(filename, 'OBJECT').split('+')) or (
'LFC' in pyfits.getval(filename, 'OBJECT').split('+')):
lc = 1
if h['SIMCALTT'] > 0:
thxe = 1
if lc + thxe == 0:
lamp_config = 'neither'
elif lc + thxe == 1:
if lc == 1:
lamp_config = 'lfc'
else:
lamp_config = 'thxe'
elif lc + thxe == 2:
lamp_config = 'both'
epoch_sublists = {}
epoch_sublists[lamp_config] = imglist[:]
# (1) call routine that does all the overscan-, bias- & dark-correction stuff and proper error treatment
img = correct_for_bias_and_dark_from_filename(filename, MB, MD, gain=gain, scalable=scalable, savefile=saveall, path=path) # [e-]
#err = np.sqrt(img + ronmask*ronmask) # [e-]
#TEMPFIX: (how should I be doing this properly???)
err_img = np.sqrt(np.clip(img,0,None) + ronmask*ronmask) # [e-]
## (2) remove cosmic rays (ERRORS MUST REMAIN UNCHANGED)
## check if there are multiple exposures for this epoch (if yes, we can do the much simpler "median_remove_cosmics")
if len(epoch_sublists[lamp_config]) == 1:
# do it the hard way using LACosmic
# identify and extract background
bg_raw = extract_background(img, chipmask['bg'], timit=timit)
# remove cosmics, but only from background
cosmic_cleaned_img = remove_cosmics(bg_raw.todense(), ronmask, obsname, path, Flim=3.0, siglim=5.0, maxiter=1, savemask=False, savefile=False, save_err=False, verbose=True, timit=True) # [e-]
# identify and extract background from cosmic-cleaned image
bg = extract_background(cosmic_cleaned_img, chipmask['bg'], timit=timit)
# bg = extract_background_pid(cosmic_cleaned_img, P_id, slit_height=30, exclude_top_and_bottom=True, timit=timit)
# fit background
bg_coeffs, bg_img = fit_background(bg, clip=10, return_full=True, timit=timit)
elif len(epoch_sublists[lamp_config]) == 2:
if new_epoch or not os.path.isfile(path + 'temp_bg_' + lamp_config + '.fits'):
# list of individual exposure times for this epoch
subepoch_texp_list = [pyfits.getval(file, 'ELAPSED') for file in epoch_sublists[lamp_config]]
tscale = np.array(subepoch_texp_list) / texp
# get background from the element-wise minimum-image of the two images
img1 = correct_for_bias_and_dark_from_filename(epoch_sublists[lamp_config][0], MB, MD, gain=gain, scalable=scalable, savefile=False)
img2 = correct_for_bias_and_dark_from_filename(epoch_sublists[lamp_config][1], MB, MD, gain=gain, scalable=scalable, savefile=False)
min_img = np.minimum(img1/tscale[0], img2/tscale[1])
# identify and extract background from the minimum-image
bg = extract_background(min_img, chipmask['bg'], timit=timit)
# bg = extract_background_pid(min_img, P_id, slit_height=30, exclude_top_and_bottom=True, timit=timit)
del min_img
# fit background
bg_coeffs, bg_img = fit_background(bg, clip=10, return_full=True, timit=timit)
# save background image to temporary file for re-use later (when reducing the next file of this sublist)
pyfits.writeto(path + 'temp_bg_' + lamp_config + '.fits', bg_img, clobber=True)
else:
# no need to re-compute background, just load it from file
print('Loading background image for this epoch and lamp configuration...')
bg_img = pyfits.getdata(path + 'temp_bg_' + lamp_config + '.fits')
else:
if new_epoch or not os.path.isfile(path + 'temp_bg_' + lamp_config + '.fits'):
# make sure this sublist is not too long (otherwise we might run out of memory in this step)
if len(epoch_sublists[lamp_config]) > 10:
mainix = epoch_sublists[lamp_config].index(filename)
if mainix < 5:
epoch_sublists[lamp_config] = epoch_sublists[lamp_config][:11]
elif mainix > len(epoch_sublists[lamp_config]) - 6:
epoch_sublists[lamp_config] = epoch_sublists[lamp_config][-11:]
else:
epoch_sublists[lamp_config] = epoch_sublists[lamp_config][mainix-5:mainix+6]
# list of individual exposure times for this epoch
subepoch_texp_list = [pyfits.getval(file, 'ELAPSED') for file in epoch_sublists[lamp_config]]
tscale = np.array(subepoch_texp_list) / texp
# make list of actual images
img_list = []
for file in epoch_sublists[lamp_config]:
img_list.append(correct_for_bias_and_dark_from_filename(file, MB, MD, gain=gain, scalable=scalable, savefile=False))
# # index indicating which one of the files in the epoch list is the "main" one
# main_index = np.where(np.array(epoch_ix) == i)[0][0]
# take median after scaling to same exposure time as main exposure
med_img = np.median(np.array(img_list) / tscale.reshape(len(img_list), 1, 1), axis=0)
del img_list
# identify and extract background from the median image
bg = extract_background(med_img, chipmask['bg'], timit=timit)
# bg = extract_background_pid(med_img, P_id, slit_height=30, exclude_top_and_bottom=True, timit=timit)
del med_img
# fit background
bg_coeffs, bg_img = fit_background(bg, clip=10, return_full=True, timit=timit)
# save background image to temporary file for re-use later (when reducing the next file of this sublist)
pyfits.writeto(path + 'temp_bg_' + lamp_config + '.fits', bg_img, clobber=True)
else:
# no need to re-compute background, just load it from file
print('Loading background image for this epoch and lamp configuration...')
bg_img = pyfits.getdata(path + 'temp_bg_' + lamp_config + '.fits')
# now actually subtract the background model
bg_corrected_img = img - bg_img
# cosmic_cleaned_img = median_remove_cosmics(img_list, main_index=main_index, scales=scaled_texp, ronmask=ronmask, debug_level=1, timit=True)
# (3) fit and remove background (ERRORS REMAIN UNCHANGED)
# bg_corrected_img = remove_background(cosmic_cleaned_img, P_id, obsname, path, degpol=5, slit_height=slit_height, save_bg=True, savefile=True, save_err=False,
# exclude_top_and_bottom=True, verbose=True, timit=True) # [e-]
# bg_corrected_img = remove_background(img, P_id, obsname, path, degpol=5, slit_height=slit_height, save_bg=False, savefile=True, save_err=False,
# exclude_top_and_bottom=True, verbose=True, timit=True) # [e-]
# adjust errors?
# (4) remove pixel-to-pixel sensitivity variations (2-dim)
#XXXXXXXXXXXXXXXXXXXXXXXXXXX
#TEMPFIX
final_img = bg_corrected_img.copy() # [e-]
# final_img = img.copy() # [e-]
#adjust errors?
# (5) extract stripes
if not from_indices:
stripes,stripe_indices = extract_stripes(final_img, P_id, return_indices=True, slit_height=slit_height, savefiles=saveall, obsname=obsname, path=path, timit=True)
err_stripes = extract_stripes(err_img, P_id, return_indices=False, slit_height=slit_height, savefiles=saveall, obsname=obsname+'_err', path=path, timit=True)
if stripe_indices is None:
# this is just to get the stripe indices in case we forgot to provide them (DONE ONLY ONCE, if at all...)
stripes,stripe_indices = extract_stripes(final_img, P_id, return_indices=True, slit_height=slit_height, savefiles=False, obsname=obsname, path=path, timit=True)
# (6) perform extraction of 1-dim spectrum
if from_indices:
pix,flux,err = extract_spectrum_from_indices(final_img, err_img, quick_indices, method='quick', slit_height=qsh, ronmask=ronmask, savefile=True,
filetype='fits', obsname=obsname, date=date, path=path, timit=True)
pix,flux,err = extract_spectrum_from_indices(final_img, err_img, stripe_indices, method=ext_method, slope=slope, offset=offset, fibs=fibs, slit_height=slit_height,
ronmask=ronmask, savefile=True, filetype='fits', obsname=obsname, date=date, path=path, timit=True)
else:
pix,flux,err = extract_spectrum(stripes, err_stripes=err_stripes, ron_stripes=ron_stripes, method='quick', slit_height=qsh, ronmask=ronmask, savefile=True,
filetype='fits', obsname=obsname, date=date, path=path, timit=True)
pix,flux,err = extract_spectrum(stripes, err_stripes=err_stripes, ron_stripes=ron_stripes, method=ext_method, slope=slope, offset=offset, fibs=fibs,
slit_height=slit_height, ronmask=ronmask, savefile=True, filetype='fits', obsname=obsname, date=date, path=path, timit=True)
# # (7) get relative intensities of different fibres
# if from_indices:
# relints = get_relints_from_indices(P_id, final_img, err_img, stripe_indices, mask=mask, sampling_size=sampling_size, slit_height=slit_height, return_full=False, timit=True)
# else:
# relints = get_relints(P_id, stripes, err_stripes, mask=mask, sampling_size=sampling_size, slit_height=slit_height, return_full=False, timit=True)
#
#
# # (8) get wavelength solution
# #XXXXX
# # (9) get barycentric correction
# if obstype == 'stellar':
# bc = get_barycentric_correction(filename)
# bc = np.round(bc,2)
# if np.isnan(bc):
# bc = ''
# # write the barycentric correction into the FITS header of both the quick-extracted and the optimal-extracted reduced spectrum files
# outfn_list = glob.glob(path + '*' + obsname + '*extracted*')
# for outfn in outfn_list:
# pyfits.setval(outfn, 'BARYCORR', value=bc, comment='barycentric velocity correction [m/s]')
# #now append relints, wl-solution, and barycorr to extracted FITS file header
# outfn = path + obsname + '_extracted.fits'
# if os.path.isfile(outfn):
# #relative fibre intensities
# dum = append_relints_to_FITS(relints, outfn, nfib=19)
# #wavelength solution
# #pyfits.setval(fn, 'RELINT' + str(i + 1).zfill(2), value=relints[i], comment='fibre #' + str(fibnums[i]) + ' - ' + fibinfo[i] + ' fibre')
if timit:
print('Total time elapsed: '+str(np.round(time.time() - start_time,1))+' seconds')
return
| [
"c.bergmann@unsw.edu.au"
] | c.bergmann@unsw.edu.au |
70dc1722e3ebbc732e9e23ca572697351d3bd2f3 | ed6f57730ebd672335361b537e38b8646ae2f904 | /factories.py | 6a6b8ccc86db10e1e045093627720ba4c0668c8e | [] | no_license | Karamax/StackCity | e229cecd4322800b287c531cfc678ca76ca3d0b1 | 7d5acb007356fb90f3efc7ab95f6361f5c9eb140 | refs/heads/master | 2021-06-13T20:44:22.275950 | 2017-05-08T13:24:50 | 2017-05-08T13:24:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,399 | py | """
A collection of factory objects
"""
from misc import make_filled_shape, shape_copy
from cells import Ground
from buildings import Dwelling, FisherBoat, Smithery, Barracks
import random
class NextItemFactory:
"""
A factory that generates items to be placed on field. It knows about the
field state to generate placeable objects
"""
def __init__(self, cell_field):
self.cell_field = cell_field
self.maker_functions = {'ground_block': self.create_ground_block,
'house': self.create_house,
'boat': self.create_boat,
'smithery': self.create_smithery}
self.possible_items = ('ground_block',
'house',
'boat',
'smithery')
@staticmethod
def get_shape_list(size):
"""
Get a list of shapes of a given size.
This method should be replaced by loading shapes from files when I get
to loading *anything* from files
:return:
"""
if size[0] == 1 or size[1] == 1:
return [make_filled_shape(size)]
if size[1] == 2:
if size[0] == 2:
return [
[[True, False], [False, True]], # Diagonal
[[True, False], [True, True]], # L-shaped
[[True, True], [True, True]] # A filled square
]
elif size[0] == 3:
return [
[[True, False], [True, False], [True, True]], # L-shaped
[[True, True], [True, False], [True, True]], # C-shaped
[[True, False], [True, True], [True, False]] # The tetris one
]
elif size[1] == 3:
if size[0] == 2:
return [
[[True, True, True], [True, False, False]],
[[True, True, True], [True, False, True]],
[[True, True, True], [False, True, False]]
]
elif size[0] == 3:
return [
[[False, True, False], # Cross-shaped
[True, True, True],
[False, True, False]],
[[True, True, True], # Dumbell-shaped
[False, True, False],
[True, True, True]]
]
def shape_ground_block(self, size=(2, 2), ground_type='water'):
"""
Create a ground block of a given size and type.
The block is given a random shape from the shapes available in a given size
:param size:
:param ground_type:
:return:
"""
shape_list = self.get_shape_list(size)
shape_index = random.randint(0, len(shape_list)-1)
shape = shape_list[shape_index]
r = shape_copy(shape)
for y in range(len(r)):
for x in range(len(r[y])):
if shape[y][x]:
r[y][x] = Ground(ground_type=ground_type)
return r
def create_ground_block(self):
"""
Create a random rectangular block of ground
:return:
"""
xsize = random.randint(1, 3)
ysize = random.randint(1, 3)
ground_type = random.choice(('water', 'living',
'military', 'infrastructure'))
return self.shape_ground_block((ysize, xsize), ground_type)
@staticmethod
def create_house():
return Dwelling(image_source='House.png',
name='A simple hut',
acceptable_ground=['living'],
max_dwellers=5)
@staticmethod
def create_boat():
return FisherBoat(image_source='Boat.png',
acceptable_ground=['water'],
name='Fishing boat',
workers_required=1)
@staticmethod
def create_smithery():
return Smithery(image_source='Workshop.png',
acceptable_ground=['military', 'infrastructure'],
name='Smithery',
workers_required=2)
def create_item(self):
next_thing = random.choice(self.possible_items)
return self.maker_functions[next_thing]()
| [
"alexeymorozov1991@gmail.com"
] | alexeymorozov1991@gmail.com |
11bfce963eac377dffb54fc7696dd1c222f0af4d | f81ab0655152c682d7850325ce17dda8a661cddf | /recipe_project/settings.py | 8a7ba3456530dfed055a533ca86bf3ca8a332ef9 | [] | no_license | gprophete/recipe-app | 077985d69d1b6ce2180a6788f34ef35e274eac71 | bbb9689a227a75d287fffa1b74a4fd5027305577 | refs/heads/master | 2023-08-05T16:30:40.852179 | 2020-07-07T02:12:20 | 2020-07-07T02:12:20 | 269,709,653 | 0 | 0 | null | 2021-09-22T19:19:15 | 2020-06-05T16:52:25 | Python | UTF-8 | Python | false | false | 3,428 | py | """
Django settings for recipe_project project.
Generated by 'django-admin startproject' using Django 3.0.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
REACT_APP_DIR = os.path.join(BASE_DIR, 'client')
STATICFILES_DIRS = [
os.path.join(REACT_APP_DIR, 'build', 'static')
]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a7@fpyb8p5o^*pjo-4b3k7bgdvxb4qih#e%@q%epb_f^o7c0eh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_extensions',
'recipe_app'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'recipe_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'recipe_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'recipe_database',
'USER': 'gprophete30',
'PASSWORD': 'galoulou',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
import django_heroku
django_heroku.settings(locals()) | [
"garlyprophete30@gmail.com"
] | garlyprophete30@gmail.com |
e1c3075f706667755ba59b0caaaedb0ba5b258d1 | 039c28f0903a2b87ef1439a991e7c2e1d898ab48 | /pyneqsys/_release.py | a9edcd4240cbd989149d9c51908621ebb04cd636 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | andim/pyneqsys | f6ddae44ef0f0fdc41725b1cc6007834b790a4fa | f22fb840692a174826bd4d0a03a52e41e63d062f | refs/heads/master | 2021-05-05T19:22:59.256424 | 2018-01-08T23:23:33 | 2018-01-08T23:23:33 | 117,774,582 | 0 | 0 | null | 2018-01-17T02:53:21 | 2018-01-17T02:53:21 | null | UTF-8 | Python | false | false | 26 | py | __version__ = '0.6.0.git'
| [
"bjodah@gmail.com"
] | bjodah@gmail.com |
eb21a0504a080bc96b70fc8e4b53bf0452b92757 | b987d52d1b8af7c6c960a41c4255839fd3d06965 | /polls/models.py | f5ee8656a1e5c1dd7088ec58ee1489adec400c08 | [] | no_license | ptv6ug/mysite | 1c70b30bd07426a6b6483ca86b1144f20cb0e5d3 | eba2a35c4c748a7362e2fef9b9af9bfd9719a46a | refs/heads/master | 2020-07-21T20:11:42.708492 | 2019-09-07T13:11:58 | 2019-09-07T13:11:58 | 206,964,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from django.db import models
class Message(models.Model):
message_text = models.CharField(max_length=200)
| [
"ptv6ug@virginia.edu"
] | ptv6ug@virginia.edu |
caac65c65a43cc7e4e65f49fac76e97c91f5f5e5 | b04dfd31c549ca467a90d5f4a8369a8211f08877 | /change.py | 9994a116c7ac2dc86f61821fdd893bd7bf7ccec2 | [] | no_license | shyamalamanikandan/text_analysis | bacd08d90587a0a971cfcce8c4e324dc775b99c9 | 9c0974651d6c6823fd19c2456823ec324250bbd7 | refs/heads/master | 2020-08-30T17:56:18.849973 | 2019-10-30T05:51:43 | 2019-10-30T05:51:43 | 218,450,971 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,293 | py | from flask import Flask, redirect, url_for, session, request, render_template, flash, Markup
from flask_oauth import OAuth
from flask import Flask, redirect, url_for, session, request, jsonify, Response,render_template
from flask import Flask, render_template, request
import pymysql.cursors
from urllib.parse import urlparse
from textblob import TextBlob
from Reader import fileReaderMethod
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
#from wordcloud import WordCloud, STOPWORDS
import requests
from wordcloud import WordCloud
from nltk.corpus import stopwords
from string import punctuation
from flask_oauth import OAuth
from uuid import uuid4
from flask_oauthlib.client import OAuth
from FrequencySummariser import Summariser
import threading
import os
import webbrowser
from flask import redirect, url_for, send_from_directory
from werkzeug.utils import secure_filename
import csv, re, collections
from collections import Counter
from flask_mail import Mail, Message
import pyotp
import time
SECRET_KEY = 'development key'
DEBUG = True
FACEBOOK_APP_ID = '2229493917289490'
FACEBOOK_APP_SECRET = 'd252637a283f20c30cef24ecd15f1bc0'
GOOGLE_CLIENT_ID = '534978381583-75t3d5f8o64n67b7qufh0k041t3eif6d.apps.googleusercontent.com'
GOOGLE_CLIENT_SECRET = 'rm2H7Hm5xQ2JuYYrrK4_mXNB'
#REDIRECT_URI = '/oauth2callback' # one of the Redirect URIs from Google APIs
app = Flask(__name__)
app.debug = DEBUG
app.secret_key = SECRET_KEY
oauth = OAuth()
#connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
UPLOAD_FOLDER = '/home/ubuntu/project/FilesUploading'
ALLOWED_EXTENSIONS = set(['txt', 'pdf','doc','docx'])
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'will2vigilant@gmail.com'
app.config['MAIL_PASSWORD'] = 'Will2Vigilant'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
facebook = oauth.remote_app('facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=FACEBOOK_APP_ID,
consumer_secret=FACEBOOK_APP_SECRET,
request_token_params={'scope': 'email'}
)
google = oauth.remote_app(
'google',
consumer_key='534978381583-75t3d5f8o64n67b7qufh0k041t3eif6d.apps.googleusercontent.com',
consumer_secret='rm2H7Hm5xQ2JuYYrrK4_mXNB',
request_token_params={
'scope': 'email'
},
base_url='https://www.googleapis.com/oauth2/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://accounts.google.com/o/oauth2/token',
authorize_url='https://accounts.google.com/o/oauth2/auth',
)
linkedin = oauth.remote_app(
'linkedin',
consumer_key='81dvhjgsnztbst',
consumer_secret='7I9Ov8bWUWgezeoF',
request_token_params={
'scope': 'r_basicprofile',
'state': 'RandomString',
},
base_url='https://api.linkedin.com/v1/',
request_token_url=None,
access_token_method='POST',
access_token_url='https://www.linkedin.com/uas/oauth2/accessToken',
authorize_url='https://www.linkedin.com/uas/oauth2/authorization',
)
@app.route('/google', methods=['GET', 'POST'])
def index():
"""if 'google_token' in session:
me = google.get('userinfo')
return jsonify({"data": me.data})"""
return redirect(url_for('glogin'))
@app.route('/glogin')
def glogin():
return google.authorize(callback=url_for('authorized', _external=True))
@app.route('/oauth2callback')
def authorized():
resp = google.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['google_token'] = (resp['access_token'], '')
me = google.get('userinfo')
username=me.data['id']
session['email']=username
query = "SELECT count(*) as count FROM register1 where email ='"+username+"'"
#print(query)
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute(query)
data=cursor.fetchall()
count =0
for row in data:
count=row['count']
print(count)
if count ==0:
cursor.execute("INSERT INTO register1 (email) VALUES(%s)",(username))
connection.close()
return render_template('new.html')
@google.tokengetter
def get_google_oauth_token():
return session.get('google_token')
@app.route('/facebook', methods=['GET', 'POST'])
def index1():
return redirect(url_for('login'))
@app.route('/session_clear',methods=['GET', 'POST'])
def logout():
session.clear()
return render_template('index.html')
@app.route('/login')
def login():
return facebook.authorize(callback=url_for('facebook_authorized',
next=request.args.get('next') or request.referrer or None,
_external=True))
@app.route('/login/authorized')
@facebook.authorized_handler
def facebook_authorized(resp):
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['oauth_token'] = (resp['access_token'], '')
me = facebook.get('/me')
username=me.data['id']
session['email']=username
query = "SELECT count(*) as count FROM register1 where email ='"+username+"'"
print(query)
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute(query)
data=cursor.fetchall()
count =0
for row in data:
count=row['count']
print(count)
if count ==0:
cursor.execute("INSERT INTO register1 (email) VALUES(%s)",(username))
connection.close()
return render_template('new.html')
@facebook.tokengetter
def get_facebook_oauth_token():
return session.get('oauth_token')
@app.route('/linkedin',methods=['GET','POST'])
def lindex():
return redirect(url_for('llogin'))
@app.route('/llogin')
def llogin():
return linkedin.authorize(callback=url_for('linkedin_authorized', _external=True))
@app.route('/oauth2linkedin')
def linkedin_authorized():
resp = linkedin.authorized_response()
if resp is None:
return 'Access denied: reason=%s error=%s' % (
request.args['error_reason'],
request.args['error_description']
)
session['linkedin_token'] = (resp['access_token'], '')
me = linkedin.get('people/~')
username=me.data['id']
session['email']=username
query = "SELECT count(*) as count FROM register1 where email ='"+username+"'"
print(query)
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
cursor.execute(query)
data=cursor.fetchall()
count =0
for row in data:
count=row['count']
print(count)
if count ==0:
cursor.execute("INSERT INTO register1 (email) VALUES(%s)",(username))
connection.close()
#return jsonify(me.data)
return render_template('new.html')
@linkedin.tokengetter
def get_linkedin_oauth_token():
return session.get('linkedin_token')
def change_linkedin_query(uri, headers, body):
auth = headers.pop('Authorization')
headers['x-li-format'] = 'json'
if auth:
auth = auth.replace('Bearer', '').strip()
if '?' in uri:
uri += '&oauth2_access_token=' + auth
else:
uri += '?oauth2_access_token=' + auth
return uri, headers, body
linkedin.pre_request = change_linkedin_query
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
@app.route('/')
def jobportal():
return render_template('index.html')
@app.route('/exit', methods=['GET', 'POST'])
def exit():
return render_template('new.html')
@app.route('/error', methods=['GET', 'POST'])
def error():
session.clear()
return render_template('index.html')
@app.route("/forgotpass")
def mailvalidate():
return render_template('otppassword.html')
@app.route('/otppassword',methods = ['POST', 'GET'])
def otpget():
if request.method == 'POST':
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
totp = pyotp.TOTP('base32secret3232')
otp=totp.now() # => '492039'
otp=str(otp)
session['email']=request.form['nm']
print(session['email'])
msg = Message('Your OTP for resetting Sentiment Analysis Password', sender = 'will2vigilant@gmail.com',
recipients = [session['email']])
msg.body = "Please enter below OTP to reset your account Password: "+otp
mail.send(msg)
error="Invalid Email_ID"
sql1=cursor.execute("select * from register1 WHERE email=%s",(session['email']))
data = cursor.fetchone()
print("Type =",type(data))
if data is not None:
sql="UPDATE register1 SET otp=%s WHERE email=%s"
cursor.execute(sql,(otp,session['email']))
return render_template('chpass.html')
connection.close()
return render_template('otppassword.html',error=error)
"""sql = "UPDATE register1 SET otp=%s WHERE email=%s"
cursor.execute(sql, (otp,session['email']))
connection.commit()
return render_template('chpass.html')"""
@app.route("/pwchange",methods = ['POST', 'GET'])
def pwchange():
if request.method == 'POST':
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
otp=request.form['otp']
password=request.form['newpass']
email=session['email']
error="Invalid OTP"
sql1=cursor.execute("select * from register1 WHERE otp=%s and email=%s",(otp,email))
data = cursor.fetchone()
print("Type =",type(data))
if data is not None:
sql="UPDATE register1 SET password=%s WHERE otp=%s"
cursor.execute(sql,(password,otp))
connection.close()
return render_template('new.html')
connection.close()
return render_template('chpass.html',error=error)
@app.route('/register',methods=['GET','POST'])
def register():
if request.method == 'POST':
try:
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
session['username']= str(request.form['username'])
session['email'] = str(request.form['email'])
print(session['email'])
text3 = str(request.form['password'])
text4 = str(request.form['phoneno'])
text5= str(request.form['country'])
text6= str(request.form['city'])
text7= str(request.form['designation'])
text8= str(request.form['Organization'])
retVal = cursor.execute("INSERT INTO register1 (name,email,password,phone,country,city,designation,organization) VALUES(%s, %s, %s, %s, %s, %s,%s,%s)",(session['username'], session['email'], text3, text4, text5, text6, text7, text8))
connection.close()
except:
error= "Email ID is already registered with us . Please use your existing password to continue or use the forget password link to reset your password."
return render_template('Register.html',error=error)
#return render_template('Register.html')
return render_template('index.html')
else:
return render_template('Register.html')
@app.route('/login_page',methods=['GET','POST'])
def login_form():
if request.method == 'POST':
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
email = str(request.form['name'])
password = str(request.form['password'])
error = 'Invalid username or password. Please try again!'
sql = cursor.execute("select * from register1 where email =%s AND password=%s",(email,password))
data = cursor.fetchone()
connection.close()
print(sql)
if sql > 0:
session['email']=email
print(session['email'])
return render_template('new.html')
else:
return render_template('index.html', error = error)
@app.route('/reset', methods=['POST','GET'])
def reset():
return render_template('new.html')
@app.route('/frontlogin', methods=['GET', 'POST'])
def f_login():
return render_template('index.html')
@app.route('/sentiments', methods=['POST','GET'])
def getsentiments():
try:
val = ""
session.pop('userid', None)
results = []
url_counter = 0
if request.method == 'POST':
#if(len(review)&&len(reviewurl)==0)
# check if the post request has the file part
if 'file' not in request.files:
review = request.form['review']
reviewurl = request.form['reviewurl']
if len(review) != 0:
val = review
elif len(reviewurl) != 0:
val = reviewurl
url_counter = 1
else:
file = request.files['file']
#return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
val = fileReaderMethod(os.path.join(app.config['UPLOAD_FOLDER'], filename))
if url_counter == 1:
import urllib.request
from bs4 import BeautifulSoup
page = urllib.request.urlopen(val).read().decode('utf8')
soup = BeautifulSoup(page)
text = ' '.join(map(lambda p: p.text, soup.find_all('p')))
val = text
#Polarity
freq, summary, polarity = Summariser(val)
print("sum",summary)
def sentiment_textblob(feedback):
senti = TextBlob(feedback)
polarity = senti.sentiment.polarity
if -1 <= polarity < -0.5:
label = 'Highly Negative'
elif -0.5 <= polarity < -0.1:
label = 'Negative'
elif -0.1 <= polarity < 0:
label = 'Slightly Negative'
elif polarity ==0:
label = 'Neutral'
elif 0 < polarity < 0.2:
label = "Slightly Positive"
elif 0.2 <= polarity < 0.6:
label = 'Positive'
elif 0.6 <= polarity <= 1:
label = 'Highly Positive'
return (label)
polarity=sentiment_textblob(val)
results.append(polarity)
allsummary = ""
wordfrquencies = ""
#results.append(freq)
udata = (val[:10000] + '..') if len(val) > 10000 else val
udata = udata.encode('ascii', 'ignore')
emailId = str(session['email'])
connection = pymysql.connect(host='localhost',user='root',password='',db='sentiment_analysis', charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
with connection.cursor() as cursor:
query = "select id from register1 where email ='"+emailId+"'"
print(query)
cursor.execute(query)
data=cursor.fetchall()
userId = -1
for row in data:
print(row)
userId=row['id']
session['userId']=userId
print(session['userId'])
#print(userId,str(session['email']) )
#session['userid']=userId
with connection.cursor() as cursor:
cursor.execute("INSERT INTO `datatable1` VALUES(%s,%s)", (session['userId'],udata))
print("database done")
connection.close()
wrdcnt = 0
print("word count",wrdcnt)
for k in freq:
if(k.isalpha()):
wordfrquencies += str(k)+', '
wrdcnt += 1
if wrdcnt == 10:
print("word count1",wrdcnt)
break
for s in summary:
allsummary += s
print("all ",allsummary)
word_freqs_js, max_freq = wordCloudCaller(val)
print("words",word_freqs_js)
print("Max Freq",max_freq)
results.append(wordfrquencies)
results.append(allsummary)
return render_template('result-new.html',results = results, word_freqs=word_freqs_js, max_freq=max_freq)
except:
error="please give valid input"
return render_template('new.html',error=error)
def wordCloudCaller(text):
print("Word Cloud")
custom_stopwords = ["let","hi"]
stopword1 = set(stopwords.words('english') + list(punctuation) + custom_stopwords)
stripped_text = []
#stripped_text = [word for word in text.split() if word.isalpha() and word.lower() not in open("stopwords", "r").read() and len(word) >= 2]
stripped_text = [word for word in text.split() if word.isalpha() and word.lower() not in list(stopword1) ]
word_freqs = Counter(stripped_text)
word_freqs = dict(word_freqs)
print(word_freqs)
word_freqs_js = []
for key,value in word_freqs.items():
temp = {"text": key, "size": value}
word_freqs_js.append(temp)
max_freq = max(word_freqs.values())
print("beginning wordcloud")
wc = WordCloud(stopwords = stopword1).generate(text)
plt.imshow(wc)
plt.figure(1,figsize=(5,5))
plt.axis('off')
plt.title('Wordcloud')
a=plt.savefig("static\\wordcloud.png")
res = Response('delete cookies')
res.set_cookie('a', '', expires=0)
plt.close()
return word_freqs_js, max_freq
if __name__ == '__main__':
app.run()
| [
"noreply@github.com"
] | noreply@github.com |
af6a4d7875dce4fcc84a7cc85f079766c0a1f9e3 | 2accc7c44180ea8666ae676484543988a9e1c4f1 | /13_dvojrozmerna_tabulka.py | 143b0eab9903cef2552f9bf6272e718791e25698 | [] | no_license | SKMHv/TEST | e7145f2bfc6221bfeacd5d20a0d98c2db5ee804e | 98709d1a49ceaa6ba5e68c396796fb6554497ba2 | refs/heads/master | 2020-03-28T03:08:33.282343 | 2019-12-07T22:15:49 | 2019-12-07T22:15:49 | 147,620,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,508 | py |
# ---------------------------------
# Tabuľka farieb
# ----------------------------------
# Ukážme dve malé aplikácie, v ktorých vytvoríme dvojrozmerný zoznam náhodných farieb,
# potom ho vykreslíme do grafickej plochy ako postupnosť malých farebných štvorčekov
# - vznikne farebná mozaika a na záver to otestujeme klikaním myšou.
# Prvý program vygeneruje dvojrozmerný zoznam náhodných farieb, vykreslí ho a uloží do textového súboru:
import random
from tkinter import *
master = Tk()
w = Canvas(master, width=200, height=400)
w.pack()
tab = []
for i in range(20): # pocet riadkov 20
p=[]
for j in range(30): # pocet stlpcov 30
p.append(f'#{random.randrange(256**3):06x}') # pridaj farbu do pola stlpcov p[]
tab.append(p) # do tabulky tab[] pridaj prvok riadku s farbami
d, x0, y0 = 10, 30, 10 # d = dlzka, X0,y0 = zaciatocna suradnica
for i in range(len(tab)): # od 0 - 19
for j in range(len(tab[i])): # od 0 - 30
x, y = d*j+x0, d*i+y0 # vyrataj suradnice x,y
w.create_rectangle(x, y, x+d, y+d, fill=tab[i][j], outline='') # kresli stvorec
with open('FILE/tabulka_farieb.txt', 'w') as subor: # vytvor / otvor subor tabulka_farieb.txt na editovanie
for riadok in tab: # pre kazdy riadok z tabulky tab[]
print(' '.join(riadok), file=subor) # zapis prvky z riadku ako string s odelovacom ' '
mainloop()
# V druhej časti programu už nebudeme generovať dvojrozmerný zoznam, ale prečítame ho z uloženého súboru.
# Keďže plánujeme klikaním meniť farby kliknutých štvorčekov, musíme si pamätať ich identifikačné čísla,
# ktoré vznikajú pri ich vykreslení pomocou create_rectangle() - použijeme na to pomocnú dvojrozmernú tabuľku re
# (rovnakých rozmerov ako tabuľka farieb). Na záver doplníme funkciu na zabezpečenie klikania: kliknutý štvorček
# sa zafarbí, napr. na bielo:
import random
from tkinter import *
from DEF import vypis
master = Tk()
w = Canvas(master, width=360, height=260)
w.pack()
# naplnenie tabulky tab[] prvkami zo suboru tabulka_farieb.txt
tab = []
with open('FILE/tabulka_farieb.txt', 'r') as subor: # otvor subor tabulka_farieb.txt na citanie
for riadok in subor: # pre kazdy riadok suboru
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
# inicializujem pomocnu tabulku re[][] pre id nakreslenych stvorcov
re = []
for i in range(len(tab)):
re.append([0] * len(tab[i]))
vypis(re) # vypis pomocne pole re[]
# vykresli a id. cisla uloz do zoznamu re[][]
d, x0, y0 = 10, 30, 10
for i in range(len(tab)):
for j in range(len(tab[i])):
x, y = d*j + x0, d*i + y0
re[i][j] = w.create_rectangle(x, y, x+d, y+d, fill=tab[i][j], outline='')
def klik(event):
stlpec, riadok = (event.x - x0) // d, (event.y - y0) // d
if 0 <= riadok < len(tab) and 0 <= stlpec < len(tab[riadok]):
w.itemconfig(re[riadok][stlpec], fill='white')
#tab[riadok][stlpec] = 'white'
w.bind('<Button-1>', klik)
mainloop()
# -----------------------
# Hra LIFE
# -----------------------
"""
Pravidlá:
v nekonečnej štvorcovej sieti žijú bunky, ktoré sa rôzne rozmnožujú, resp. umierajú
v každom políčku siete je buď živá bunka, alebo je políčko prázdne (budeme označovať ako 1 a 0)
každé políčko má 8 susedov (vodorovne, zvislo aj po uhlopriečke)
v každej generácii sa s každým jedným políčkom:
- ak je na políčku bunka a má práve 2 alebo 3 susedov, tak táto bunka prežije aj do ďalšej generácie
- ak je na políčku bunka a má buď 0 alebo 1 suseda, alebo viac ako 3 susedov, tak bunka na tomto políčku do ďalšej generácie neprežije (umiera)
- ak má prázdne políčko presne na troch susediacich políčkach živé bunky, tak sa tu v ďalšej generácii narodí nová bunka
Štvorcovú sieť s 0 a 1 budeme ukladať v dvojrozmernej tabuľke veľkosti n x n. V tejto tabuľke je momentálna generácia bunkových živočíchov.
Na to, aby sme vyrobili novú generáciu, si pripravíme pomocnú tabuľku rovnakej veľkosti a do nej budeme postupne zapisovať bunky novej generácie.
Keď už bude celá táto pomocná tabuľka hotová, prekopírujeme ju do pôvodnej tabuľky. Dvojrozmernú tabuľku budeme vykresľovať do grafickej plochy.
"""
import random
from tkinter import *
master = Tk()
w = Canvas(master, width=600, height=600, bg = "white")
w.pack()
def inicializuj_siet():
"""
Vykresli maticu nxn stvorcov o velikosti d
:return: viacrozmerne pole re[]
"""
d, x0, y0 = 10, 40, 70
xt, yt = x0+(n*d)/2, y0-35
re = []
global text
text = w.create_text(xt, yt ,text='1. GENERACIA', font='arial 25')
for i in range(n):
re.append([0]*n)
for j in range(n):
x, y = d*j+x0, d*i+y0
re[i][j] = w.create_rectangle(x, y, x+d, y+d, fill="white", outline="grey")
return re
def nahodne():
"""
Vygeneruje viacrozmerne pole siet[p[0],p[1],..p[n]] s nahodnymi hodnotami 0 a 1
:return: viacrozmerne pole siet[]
"""
siet = []
for i in range(n):
p = []
for j in range(n):
p.append(random.randrange(2)) # pre urcenu siet treba 2 zmenit na 1
siet.append(p)
# siet[5][2] = siet[5][3] = siet[5][4] = siet[4][4] = siet[3][3] = 1 # odkomentovat ak chcem urcit siet sam
return siet
def kresli(t):
for i in range(n):
for j in range(n):
farba = ['white', 'black'][siet[i][j]]
w.itemconfig(re[i][j], outline='red', width=2)
w.update()
w.after(t)
w.itemconfig(re[i][j], fill=farba, outline="grey",width=1)
w.update()
def pocet_susedov(rr, ss):
pocet = 0
for r in(rr-1, rr, rr+1):
for s in(ss-1, ss, ss+1):
if 0 <= r < n and 0 <= s < n:
pocet += siet[r][s]
return pocet - siet[rr][ss]
def nova():
siet1 = []
for i in range (n):
siet1.append([0] * n)
for i in range(n):
for j in range(n):
p = pocet_susedov(i, j)
if p == 3 or p == 2 and siet[i][j]:
siet1[i][j] = 1
siet[:] = siet1
kresli(0)
def rob(kolko=100):
for i in range(kolko):
w.itemconfig(text, text='{}. GENERACIA' .format(i+2))
nova()
# START
n = 50
re = inicializuj_siet() # vykresli siet
#print('re - ', re)
siet = nahodne()
#print('siet = ', siet)
kresli(0)
rob()
mainloop()
# -----------------------
# CVICENIA - http://python.input.sk/13.html#vytvaranie-dvojrozmernych-tabuliek
# -----------------------
# 1. Funkcia vypis_sucty(tab) vypíše súčty prvkov v jednotlivých riadkoch tabuľky, súčty vypisuje vedľa seba.
# >>> vypis_sucty([[1, 2, 3], [4], [5, 6]])
# 6 4 11
def vypis_sucty(tab):
for i in tab:
sucet = 0
#print('riesim i - ', i)
for j in i:
sucet += j
print(sucet, end=' ')
a = [[1,2,3],[4],[5,6]]
vypis_sucty(a)
# 2. Funkcia zoznam_suctov(tab) počíta súčty prvkov v riadkoch (podobne ako v predchádzajúcej úlohe),
# ale tieto súčty nevypisuje ale ukladá do výsledného zoznamu.
# >>> suc = zoznam_suctov([[1, 2, 3], [4], [5, 6]])
# >>> suc
# [6, 4, 11]
def zoznam_suctov(tab):
vysl =[]
for i in tab:
sucet = 0
print('riesim i - ', i)
for j in i:
sucet += j
vysl.append(sucet)
return vysl
a = [[1,2,3],[4],[5,6]]
suc = zoznam_suctov(a)
print(suc)
# 3. Funkcia pridaj_sucty(tab) podobne ako predchádzajúce úlohy počíta súčty po riadkoch, ale ich ukladá na koniec každého riadka tabuľky.
# >>> a = [[1, 2, 3], [4], [5, 6]]
# >>> pridaj_sucty(a)
# >>> a
# [[1, 2, 3, 6], [4, 4], [5, 6, 11]]
def pridaj_sucty(tab):
for i in tab:
sucet = 0
print('riesim i - ', i)
for j in i:
sucet += j
i.append(sucet)
a = [[1,2,3],[4],[5,6]]
pridaj_sucty(a)
print(a)
# 4. Funkcia preklop(matica) vyrobí novú maticu (dvojrozmernú tabuľku), v ktorej bude pôvodná preklopená okolo hlavnej
# uhlopriečky. Predpokladáme, že všetky riadky majú rovnakú dĺžku.
# >>> p = [[1, 2], [5, 6], [3, 4]]
# >>> q = preklop(p)
# >>> q
# [[1, 5, 3], [2, 6, 4]]
def preklop(matica):
# inicializuj prazdnu maticu
inv_matica = [[0]*len(matica), [0]*len(matica)]
print('inv_matica = ', inv_matica)
# preklapam maticu
for i in range(len(matica)):
n = 0
for j in matica[i]:
inv_matica[n][i] = j
print('inv_matica[{}][{}] - {}'.format(n,i,inv_matica[n][i]))
n += 1
print('----- preklopenie ukoncene -----')
return inv_matica
p = [[1, 2], [5, 6], [3, 4]]
q = preklop(p)
print(q)
# 5. Funkcia preklop_sa(matica) pracuje ako predchádzajúci príklad, ale namiesto výslednej matice
# (teda funkcia nič nevracia) funkcia zmení samotnú vstupnú maticu.
#
# >>> p = [[1, 2], [5, 6], [3, 4]]
# >>> preklop_sa(p)
# >>> p
# [[1, 5, 3], [2, 6, 4]]
def preklop_sa(matica):
# inicializuj prazdnu inv maticu
inv_matica = []
for i in range(len(matica[0])):
inv_matica.append([0]*len(matica))
print('inv_matica = ', inv_matica)
print('----- inverzna matica inicializovana -----\n')
# preklapam maticu
for i in range(len(matica)):
n = 0
for j in matica[i]:
inv_matica[n][i] = j
# print('inv_matica[{}][{}] - {}'.format(n,i,inv_matica[n][i]))
n += 1
print('inv_matica - ', inv_matica)
print('----- inverzna matica naplnena -----\n')
matica.clear()
print('matica - ', matica)
print('----- povodna matica vynulovana -----\n')
for i in inv_matica:
matica.append(i)
print(matica)
print('----- povodna matica naplnena -----\n')
p = [[1, 2], [5, 6], [3, 4]]
preklop_sa(p)
print(p)
# 6. Funkcia zisti_dlzky(tab) zistí, či sú všetky riadky vstupnej tabuľky rovnako dlhé, ak áno,
# funkcia vráti túto dĺžku, inak vráti None.
#
# >>> p = [[1, 2], [3, 4], [5, 6]]
# >>> zisti_dlzky(p)
# 2
# >>> zisti_dlzky([[1, 2, 3]])
# 3
# >>> zisti_dlzky([[], [1, 2, 3]]) # vráti None
# >>>
def zisti_dlzky(tab):
dlzky = []
for i in tab:
dlzky.append(len(i))
print('dlzky - ', dlzky)
print('------------ dlzky su nacitane ------------')
dlzka = dlzky[0]
if len(tab) > 1 and len(tab) != 0:
for j in dlzky[1:]:
if j != dlzka:
return
else:
return dlzka
else:
return dlzka
p = [[1, 2], [3, 4], [5, 6]]
print('1. [[1, 2], [3, 4], [5, 6]] = ', zisti_dlzky(p),'\n')
print('2. [[1, 2, 3]] = ', zisti_dlzky([[1, 2, 3]]), '\n')
print('3. [[], [1, 2, 3]] = ', zisti_dlzky([[], [1, 2, 3]]), '\n')
print('4. [[]] = ', zisti_dlzky([[]]), '\n')
print('5. [[],[],[]] = ', zisti_dlzky([[],[],[]]), '\n')
print('6. [[],[],[1]] = ', zisti_dlzky([[],[],[1]]), '\n')
# 7. Funkcia dopln(tab) doplní do vstupnej tabuľky do každého riadka minimálny počet None tak, aby mali všetky riadky rovnakú dĺžku.
#
# >>> a = [[5, 6], [1, 2, 3], [4]]
# >>> dopln(a)
# >>> a
# [[5, 6, None], [1, 2, 3], [4, None, None]]
def dopln(tab):
dlzky = []
for i in tab:
dlzky.append(len(i))
print('dlzky - ', dlzky)
max_dlzka = max(dlzky)
print('max dlzka - ', max_dlzka)
print('------------ dlzky su nacitane a zistena najvacsia ------------')
for j in tab:
if len(j) < max_dlzka:
for r in range(max_dlzka - len(j)):
j.append(None)
print('j = ', j)
print('----------------------------------------------------------------')
a = [[5, 6], [1, 2, 3], [4]]
dopln(a)
print(a)
# 8. Zistite, čo počíta
def test(mat):
vysl, n = 0, len(mat)
for i in range(n):
for j in range(n):
print('{} += abs({}) - {}' .format(vysl,mat[i][j], mat[j][i]), end = " => ")
vysl += abs(mat[i][j] - mat[j][i])
print(vysl)
return vysl
a = [[1, 2], [1, 1]]
b = [[1, 2, 3], [2, 2, 1], [3, 1, 3]]
print(test(a)) # 2
print(test(b)) # 0
# 9. Funkcia zisti(tab1, tab2) zistí, či majú dve vstupné tabuľky úplne rovnaké rozmery,
# t. j. majú rovnaký počet rovnakodlhých riadkov.
#
# >>> a = [[5, 6], [1, 2, 3], [4]]
# >>> b = [[0, 0], [0, 0, 0], [0]]
# >>> zisti(a, b)
# True
# >>> del b[-1][-1]
# >>> zisti(a, b)
# False
def zisti(tab1, tab2):
if len(tab1) != len(tab2):
return False
else:
for i in range(len(tab1)):
if len(tab1[i]) != len(tab2[i]):
return False
return True
a = [[5, 6], [1, 2, 3], [4]]
b = [[0, 0], [0, 0, 0], [0]]
print(zisti(a, b)) # True
del b[-1][-1]
print(zisti(a, b)) # False
# 10. Funkcia sucet(tab1, tab2) vráti novú tabuľku, ktorá je súčtom dvoch vstupných rovnakoveľkých číselných tabuliek.
# Funkcia vráti takú tabuľku, v ktorej je každý prvok súčtom dvoch prvkov zo vstupných tabuliek s rovnakým indexom.
#
# >>> a = [[5, 6], [1, 2, 3], [4]]
# >>> b = [[-1, -3], [-2, 0, 1], [2]]
# >>> c = sucet(a, b)
# >>> c
# [[4, 3], [-1, 2, 4], [6]]
def sucet(tab1, tab2):
from DEF import zisti
print()
if zisti(tab1, tab2) != False:
print('Su rovnako velke ------')
tab_suctov = []
for i in range(len(tab1)):
j_sucty = []
for j in range(len(tab1[i])):
j_sucty.append(tab1[i][j] + tab2[i][j])
tab_suctov.append(j_sucty)
return tab_suctov
else:
return print('Tabulky nie su rovnako velke !!!')
c = 0
a = [[5, 6], [1, 2, 3], [4]]
b = [[-1, -3], [-2, 0, 1], [2]]
c = sucet(a, b) # [[4, 3], [-1, 2, 4], [6]]
print('---------------------------------------------------------')
print(c)
# 11. Textový súbor v každom riadku obsahuje niekoľko slov, oddelených medzerou (riadok môže byť aj prázdny).
# Funkcia citaj(meno_suboru) prečíta tento súbor a vyrobí z neho dvojrozmernú tabuľku: každý riadok tabuľky zodpovedá jednému riadku súboru,
#
# napr. ak súbor text.txt:
#
# anicka dusicka
# kde si bola
# ked si si cizmicky
# zarosila
#
# potom:
#
# >>> s = citaj('text.txt')
# >>> s
# [['anicka', 'dusicka'], ['kde', 'si', 'bola'], ['ked', 'si', 'si', 'cizmicky'], ['zarosila']]
def citaj(meno_suboru):
tab = []
with open(meno_suboru, 'r') as subor: # otvor subor na citanie
for riadok in subor: # pre kazdy riadok suboru
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
return tab
subor = '/home/echolom/PycharmProjects/untitled1/FILE/subor_text.txt'
print('------------------------------------------------------------')
print(citaj(subor)) # [['anicka', 'dusicka'], ['kde', 'si', 'bola'], ['ked', 'si', 'si', 'cizmicky'], ['zarosila']]
# 12. Funkcia zapis(tab, meno_suboru) je opačná k predchádzajúcemu príkladu: zapíše danú dvojrozmernú tabuľku slov do súboru.
# napr.
#
# >>> s = [['ANICKA', 'dusicka'], ['kde', 'si', 'bola'], ['ked', 'si', 'si', 'cizmicky'], ['zarosila']]
# >>> zapis(s, 'text1.txt')
#
# vytvorí rovnaký súbor ako bol text.txt
def zapis(tab, meno_suboru):
with open(meno_suboru, 'w') as subor: # otvory/vytvory subor na zapis
for riadok in tab: # pre kazdy riadok tabulky
print(' '.join([str(i) for i in riadok]), file=subor) # vsetke prvky riadku zmen na str, prvky oddel ' ' a zapis do suboru
print('dokoncene - do suboru boli zapisane riadky')
subor = '/home/echolom/PycharmProjects/untitled1/FILE/subor_cislo.txt'
#s = [['anicka', 'dusicka'], ['kde', 'si', 'bola'], ['ked', 'si', 'si', 'cizmicky'], ['zarosila']]
s = [[1, 11, 21], [345], [-5, 10]]
print('------------------------------------------------------------')
zapis(s,subor)
# 13. Funkcia citaj_cisla(meno_suboru) bude podobná funkcii citaj(meno_suboru) z (11) úlohy,
# let táto predpokladá, že vstupný súbor obsahuje len celé čísla. Funkcia vráti dvojrozmernú tabuľku čísel.
#
# napr.pre textový súbor z(12) úlohy:
#
# >>> tab = citaj_cisla('cisla.txt')
# >>> tab
# [[1, 11, 21], [345], [-5, 10]]
def citaj_cisla(meno_suboru):
tab = []
n = 0
with open(meno_suboru, 'r') as subor: # otvory/vytvory subor na zapis
for riadok in subor: # pre kazdy riadok suboru
print(type(riadok))
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
tab[n] = [int(i) for i in tab[n]] # premen prvky riadku v tab[] na integer
n += 1
return tab
subor = '/home/echolom/PycharmProjects/untitled1/FILE/subor_cislo.txt'
print('------------------------------------------------------------')
a = citaj_cisla(subor)
print(a) # [[1, 11, 21], [345], [-5, 10]]
# 14. Funkcia prvky(tab) z dvojrozmernej tabuľky vyrobí (funkcia vráti) jednorozmernú:
# všetky prvky postupne pridáva do výsledného zoznamu.
#
# >>> a = [[5, 6], [1, 2, 3], [4]]
# >>> b = prvky(a)
# >>> b
# [5, 6, 1, 2, 3, 4]
def prvky(tab):
print()
vysl_tab = []
for riadok in tab:
for i in riadok:
#print('tlacim i -', i)
vysl_tab.append(i)
return vysl_tab
a = [[5, 6], [1, 2, 3], [4]]
b = prvky(a)
print(b) # [5, 6, 1, 2, 3, 4]
# 15. Funkcia vyrob(pr, ps, hodnoty) vyrobí dvojrozmernú tabuľku s počtom riadkov pr a počtom stĺpcov ps.
# Prvky zoznamu hodnoty postupne priradzuje po riadkoch do novovytváranej tabuľky.
# Ak je vstupný zoznam hodnôt kratší ako potrebujeme, začne z neho čítať od začiatku.
#
# napr.
#
# >>> xy = vyrob(3, 2, [3, 5, 7])
# >>> xy
# [[3, 5], [7, 3], [5, 7]]
# >>> vyrob(3, 3, list(range(1, 20, 2)))
# [[1, 3, 5], [7, 9, 11], [13, 15, 17]]
def vyrob(pr, ps, hodnoty):
print()
new_tab = []
n = 0
print('----- inicializujem tabulku -----')
for r in range(pr):
new_tab.append([0]*ps)
for s in range(ps):
new_tab[r][s] = hodnoty[n]
if n != len(hodnoty)-1:
n += 1
else:
n = 0
# print("new_tab - ", new_tab)
return new_tab
xy = vyrob(3, 2, [3, 5, 7])
print('--------------------------------------------')
print(xy) # [[3, 5], [7, 3], [5, 7]]
print(vyrob(3, 3, list(range(1, 20, 2)))) # [[1, 3, 5], [7, 9, 11], [13, 15, 17]]
# 16. Vytvorte (napr. v notepade) textový súbor, ktorý obsahuje aspoň 5 riadkov s piatimi farbami (len mená farieb).
# Napíšte funkciu kruhy(meno_suboru), ktorá prečíta tento súbor a farby zo súboru vykreslí ako farebné kruhy.
# Tieto budú vykreslené tesne vedľa saba po riadkoch. Súbor najprv prečítajte do dvojrozmernej tabuľky farieb a potom vykresľujte.
#
# Text. subor obsahuje:
#
# yellow yellow blus yellow yellow
# yellow blue yellow blue yellow
# blue yellow red yellow blue
# yellow blue yellow blue yellow
# yellow yellow blue yellow yellow
#
# Volanie:
#
# >>> kruhy('farby.txt')
# vykreslí 25 kruhov v piatich radoch po 5
from tkinter import *
def kruhy(meno_suboru):
master = Tk()
w = Canvas(master, width=400, height=400, bg = 'black')
w.pack()
# naplnenie tabulky tab[] prvkami zo suboru tabulka_farieb.txt
tab = []
with open(meno_suboru, 'r') as subor: # otvor subor tabulka_farieb.txt na citanie
for riadok in subor: # pre kazdy riadok suboru
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
print('tab - ', tab)
# zacinam kreslit kruhy
a = 50 # velikost kruhu
y = 0 # startovacia suradnica y
for i in range(len(tab)):
x = 0 # suradnica pre x
for j in range(len(tab[i])):
w.create_oval(x,y,x+a,y+a, outline=tab[i][j])
print('x1,y1 x2,y2 - {},{} {}{} ' .format(x,y, x+10,y+10))
x += a
y += a
print('Dalsi riadok --------------')
mainloop()
# --------------------------------------------------------
subor = '/home/echolom/PycharmProjects/untitled1/FILE/cviv_13_16_farby_kruhov.txt'
kruhy(subor)
# 17. Predchádzajúci príklad upravte tak, aby ak by bol v súbore namiesto nejakej farby None, bude to označovať,
# že sa príslušný kruh vynechá (ostane po ňom prázdne miesto).
#
# napr. súbor môže vyzerať aj takto:
#
# yellow yellow blus yellow yellow
# yellow blue None blue yellow
# blue None red None blue
# yellow blue None blue yellow
# yellow yellow blue yellow yellow
#
# volanie:
# >>> kruhy('farby.txt')
# vykreslí 21 kruhov v piatich radoch po 5, 4, 3, 4, 5 kruhoch
from tkinter import *
def kruhy(meno_suboru):
master = Tk()
w = Canvas(master, width=400, height=400, bg = 'black')
w.pack()
# naplnenie tabulky tab[] prvkami zo suboru tabulka_farieb.txt
tab = []
with open(meno_suboru, 'r') as subor: # otvor subor tabulka_farieb.txt na citanie
for riadok in subor: # pre kazdy riadok suboru
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
print('tab - ', tab)
# zacinam kreslit kruhy
a = 50 # velikost kruhu
y = 0 # startovacia suradnica y
for i in range(len(tab)):
x = 0 # suradnica pre x
for j in range(len(tab[i])):
farba = tab[i][j] # prirad hodnotu prvku z tab do farba
if farba != 'None': # ak hodnota prvka tabulky tab sa nerovna 'None'
w.create_oval(x,y,x+a,y+a, outline=farba)
else:
print('Farba je None - prazdny kruh')
print('x1,y1 x2,y2 - {},{} {}{} ' .format(x,y, x+10,y+10))
x += a
y += a
print('Dalsi riadok --------------')
mainloop()
# --------------------------------------------------------
subor = '/home/echolom/PycharmProjects/untitled1/FILE/cviv_13_17_farby_kruhov.txt'
kruhy(subor)
# 18. Textový súbor v prvom riadku obsahuje dve čísla: počet riadkov a stĺpcov dvojrozmernej tabuľky.
# V každom ďalšom sa nachádza trojica čísel: číslo riadka, číslo stĺpca, hodnota. Funkcia precitaj(meno_suboru)
# z tohto súboru vytvorí dvojrozmernú tabuľku čísel, v ktorej budú na zadaných pozíciách dané hodnoty.
#
# napr pre subor
# 4 5
# 3 1 7
# 0 1 1
# 3 3 3
# 2 4 9
#
# >>> tab = precitaj('subor.txt')
# >>> tab
# [[0, 1, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 9], [0, 7, 0, 3, 0]]
def precitaj(meno_suboru):
# naplnenie tabulky tab[] prvkami zo suboru tabulka_farieb.txt
tab = []
with open(meno_suboru, 'r') as subor: # otvor subor tabulka_farieb.txt na citanie
r = 0
for riadok in subor: # pre kazdy riadok suboru
tab.append(riadok.split()) # pridaj riadok ako prvok do tabulky tab[]
tab[r] = [int(i) for i in tab[r]] # premen prvky riadku v tab[] na integer
r += 1
print('tab - ', tab)
# zisti pocet riadkov a stlpcov novej tabulky
pocet_riadkov = 0
pocet_stlpcov = 0
for i in tab:
if pocet_riadkov < i[0]:
pocet_riadkov = i[0]
if pocet_stlpcov < i[1]:
pocet_stlpcov = i[1]
print('Pocet riadkov = ', pocet_riadkov)
print('Pocet stlpcov = ', pocet_stlpcov)
# inicializuj novu tabulku
vysl_tab = []
for i in range(pocet_riadkov):
vysl_tab.append([0]*pocet_stlpcov)
# print('vysl_tab = ', vysl_tab)
# naplnam vysl_tab
for i in range(len(tab)):
for j in range(len(tab[i])):
if j == 2:
vysl_tab[tab[i][0]][tab[i][1]] = tab[i][j]
# print('Vysledna tabulka - ', vysl_tab)
return vysl_tab
t = precitaj('/home/echolom/PycharmProjects/untitled1/FILE/cviv_13_18.txt')
print('================================\nVysledna tabulka - ', t) | [
"michal.hvila@gmail.com"
] | michal.hvila@gmail.com |
251306cfe757fabdd3df796e9bfe8dfff0b75629 | 528f37bb7cdd2d9275acbe81505df8aa248182a9 | /modules/tests/inv/create_item.py | 8bf15d4b4493fde7f37eef2ee2b39731325ef371 | [
"MIT"
] | permissive | flavour/rgims | 6b6f3cbacc395f2d7e0918237ac71defd40735b5 | 469717166c98cd5b6db9e870ce1eeac2a334a457 | refs/heads/master | 2021-01-16T22:10:20.051617 | 2013-12-03T09:19:00 | 2013-12-03T09:19:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,442 | py | # -*- coding: utf-8 -*-
""" Sahana Eden Automated Tests - INV005 Create Item
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
from tests.web2unittest import SeleniumUnitTest
class CreateItem(SeleniumUnitTest):
def test_inv005_create_item(self):
"""
@case: INV005
@description: Create an Item
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
print "\n"
# Login, if not-already done so
self.login(account="admin", nexturl="asset/item/create")
self.browser.find_element_by_id("supply_item_um").clear()
self.create("supply_item",
[( "name",
"Soup" ),
( "um",
"litre" ),
( "item_category_id",
"Standard > Food",
"option"),
( "model",
"Tomato" ),
( "year",
"2012" ),
( "comments",
"This is a Test Item" )]
)
| [
"snpabilonia@dswd.gov.ph"
] | snpabilonia@dswd.gov.ph |
9e5b0d298bfa908f58d246f53598ca839a3cd541 | 1c98b39a1b835320dfdd0b14de6000a7c5b8fd10 | /tests/test_version.py | 246d00edcf41b47ac2cfd679d06d2c2decccb105 | [
"MIT"
] | permissive | tdegeus/GMatElastoPlasticQPot | 8a5b8ebdbf9b6a97b356c9140d2cb802541fa422 | a88ec9df47aceb21571f76120e8ebbd27b5c84e8 | refs/heads/main | 2022-12-20T19:52:00.445921 | 2022-12-09T14:30:24 | 2022-12-09T14:51:29 | 128,232,413 | 0 | 0 | MIT | 2022-12-09T14:51:31 | 2018-04-05T16:18:05 | C++ | UTF-8 | Python | false | false | 563 | py | import unittest
import GMatElastoPlasticQPot as gmat
class Test_main(unittest.TestCase):
""" """
def test_version_dependencies(self):
deps = gmat.version_dependencies()
deps = [i.split("=")[0] for i in deps]
self.assertTrue("gmatelastoplasticqpot" in deps)
self.assertTrue("gmattensor" in deps)
self.assertTrue("qpot" in deps)
self.assertTrue("xtensor" in deps)
self.assertTrue("xtensor-python" in deps)
self.assertTrue("xtl" in deps)
if __name__ == "__main__":
unittest.main()
| [
"tdegeus@users.noreply.github.com"
] | tdegeus@users.noreply.github.com |
6c965678baa7cebf2a03764ddb7523795f47ebf2 | 1885e952aa4a89f8b417b4c2e70b91bf1df887ff | /ABC096/A.py | cc0a107e3cc810c92a52750a636bff76ae2ca381 | [] | no_license | takumiw/AtCoder | 01ed45b4d537a42e1120b1769fe4eff86a8e4406 | 23b9c89f07db8dd5b5345d7b40a4bae6762b2119 | refs/heads/master | 2021-07-10T12:01:32.401438 | 2020-06-27T14:07:17 | 2020-06-27T14:07:17 | 158,206,535 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 77 | py | a, b = map(int, input().split())
if b >= a:
print(a)
else:
print(a-1) | [
"w.tak.1229@gmail.com"
] | w.tak.1229@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.