blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 213
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 246
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d6f9c06998c30989b694c28b3da3ce04272f062f | 61939b14aefb49057ac6aa93ea2b33c2a967988b | /actvision/config/urls.py | 70223914199c62ea7aa80dce37b2bb1ee64987bd | [] | no_license | ninanonansilo/actvision826 | 7e237608703e58e7bb3ea21e34044c790f07bc12 | fdd7852ce2a92199919f58836a81675122842e7a | refs/heads/master | 2023-07-05T01:06:57.149773 | 2021-08-26T11:51:14 | 2021-08-26T11:51:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,067 | py | """Actvision URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home0
.
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
import loginapp.views
import home.views
import movie.views
import settings.views
import inform.views
import register.views
import imgn.views
urlpatterns = [
path('admin/', admin.site.urls),
path('', loginapp.views.login, name='login.html'),
path('login/', loginapp.views.login_success, name='login_success.html'),
path('home', home.views.home, name='home.html'),
path('home/movie', movie.views.movie, name='movie.html'),
path('home/movie/video_list', movie.views.video_list, name='video_list'),
path('home/movie/upload_list', movie.views.upload_list, name='upload_list'),
path('home/movie/upload_video', movie.views.upload_video, name='upload_video'),
path('home/movie/delete_play_list', movie.views.delete_play_list, name='delete_play_list'),
path('home/movie/delete_video', movie.views.delete_video, name='delete_video'),
#path('home/setting', include('settings.urls')),
path('home/setting', settings.views.settings, name='settings.html'),
path('home/setting/check', settings.views.check, name='check'),
path('home/setting/check_pattern', settings.views.check_pattern, name='check_pattern'),
path('home/setting/check_Brightness_mode', settings.views.check_Brightness_mode, name='check_Brightness_mode'),
path('home/setting/update_Brightness', settings.views.update_Brightness, name='update_Brightness'),
path('home/setting/update_CDS_Value', settings.views.update_CDS_Value, name='update_CDS_Value'),
path('home/setting/update_min_max', settings.views.update_min_max, name='update_min_max'),
path('home/setting/power_mode', settings.views.power_mode, name='power_mode'),
path('home/setting/manual_control', settings.views.manual_control, name='manual_control'),
path('home/setting/update_on_off', settings.views.update_on_off, name='update_on_off'),
path('home/inform', inform.views.inform, name='inform.html'),
path('home/register', register.views.register, name='register.html'),
path('home/register/users_list', register.views.users_list, name='users_list'),
path('home/imgn', imgn.views.imgn, name='image.html'),
path('home/imgn/upload_img', imgn.views.upload_img, name='upload_img'),
path('home/imgn/save_letter', imgn.views.save_letter, name='save_letter'),
path('home/imgn/event_trans', imgn.views.event_trans, name='event_trans'),
]
| [
"ckdgl@DESKTOP-6NQFU1P"
] | ckdgl@DESKTOP-6NQFU1P |
8f8251d41d03992c97c4284cab8980b06dce2ee6 | c36e8ac0ccfd34a7d4245068b3d4ed6199927f9b | /main.py | 0359c0eb11ccf5be256bc113ac8c06421867203b | [] | no_license | avec140/project | 332d9a87c09400ef52e90ca5b2f60c9643531591 | d3e60766b81c8fcfff61dabdd5849ec10ce4fba0 | refs/heads/master | 2023-04-20T12:27:36.575993 | 2021-04-30T09:24:39 | 2021-04-30T09:24:39 | 363,083,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,749 | py | from tkinter import *
from tkinter.colorchooser import askcolor
DEFAULT_PEN_SIZE = 1.0
DEFAULT_COLOR = "black"
mode = "pen"
old_x = None
old_y = None
mycolor = DEFAULT_COLOR
erase_on = False
def use_pen():
global mode
mode = "pen"
def use_brush():
global mode
mode = "brush"
def choose_color():
global mycolor
mycolor = askcolor(color=mycolor)[1]
def use_eraser():
global mode
mode = "erase"
def paint(event):
global var, erase_on, mode, old_x, old_y
fill_color = 'white' if mode == "erase" else mycolor
if old_x and old_y:
canvas.create_line(old_x, old_y, event.x, event.y, caspstyle=ROUND, width=var.get(), fill=fill_color)
old_x = event.x
old_y = event.y
def reset(event):
global old_x, old_y
old_x, old_y = None, None
def clear_all():
global canvas
canvas.delete('all')
window = Tk()
var = DoubleVar()
penButton = Button(window, text='ํ', command=use_pen)
penButton.grid(row=0, column=0, sticky=W + E)
brushButton = Button(window, text='๋ธ๋ฌ์ฌ', command=use_brush)
brushButton.grid(row=0, column=1, sticky=W + E)
colorButton = Button(window, text='์์์ ํ', command=choose_color)
colorButton.grid(row=0, column=2, sticky=W + E)
eraseButton = Button(window, text='์ง์ฐ๊ฐ', command=use_eraser)
eraseButton.grid(row=0, column=3, sticky=W + E)
clearButton = Button(window, text='๋ชจ๋์ญ์ ', command=clear_all)
clearButton.grid(row=0, column=4, sticky=W + E)
scale = Scale(window, variable=var, orient=VERTICAL)
scale.grid(row=1, column=5, sticky=N + S)
canvas = Canvas(window, bg='white', width=600, height=400)
canvas.grid(row=1, columnspan=5)
canvas.bind('<B1-Motion>', paint)
canvas.bind('<ButtonRelease-1>', reset)
window.mainloop()
| [
"avec140@naver.com"
] | avec140@naver.com |
e8ba2a98ff92412f2246fd72b4c6ec99a9424125 | 4c0a2efb54a87e8419c530e49173484660021c16 | /src/demo_hic_et_nunc/types/hen_minter/storage.py | f5033f16e1f986ad59cd0840b788ee2872f06481 | [
"MIT"
] | permissive | jellybazil/dipdup-py | 7cc6641b7a25379034be401626d91d17d2493f43 | 950b086effbfce78080461ecc2f959ba7a8ba998 | refs/heads/master | 2023-08-12T06:50:01.445161 | 2021-10-16T20:52:29 | 2021-10-16T20:52:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | # generated by datamodel-codegen:
# filename: storage.json
from __future__ import annotations
from typing import Dict
from pydantic import BaseModel, Extra
class Royalties(BaseModel):
class Config:
extra = Extra.forbid
issuer: str
royalties: str
class Swaps(BaseModel):
class Config:
extra = Extra.forbid
issuer: str
objkt_amount: str
objkt_id: str
xtz_per_objkt: str
class HenMinterStorage(BaseModel):
class Config:
extra = Extra.forbid
curate: str
genesis: str
hdao: str
locked: bool
manager: str
metadata: Dict[str, str]
objkt: str
objkt_id: str
royalties: Dict[str, Royalties]
size: str
swap_id: str
swaps: Dict[str, Swaps]
| [
"noreply@github.com"
] | jellybazil.noreply@github.com |
e9a1e970d4704ef0445f93aed0cd5162806488f7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03273/s702731643.py | a626a36c61e3c295dfc6c90d75e2a4adb265c98f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 745 | py | from collections import defaultdict
import itertools
import copy
def readInt():
return int(input())
def readInts():
return list(map(int, input().split()))
def readChar():
return input()
def readChars():
return input().split()
def p(arr,b="\n",e="\n"):
print(b,end="")
for i in arr:
for j in i:
print(j,end="")
print()
print(e,end="")
h,w = readInts()
a = [list(input()) for i in range(h)]
for i in range(h-1,-1,-1):
boo = 1
for j in range(w-1,-1,-1):
if a[i][j]=="#":
boo = 0
if boo==1:
del a[i]
for i in range(len(a[0])-1,-1,-1):
boo = 1
for j in range(len(a)-1,-1,-1):
if a[j][i]=="#":
boo = 0
if boo==1:
for j in range(len(a)-1,-1,-1):
del a[j][i]
p(a,b="",e="") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
6810448a2a2f895bb4d8c9a6ddda997f4967d5d2 | 99b8b8f06f2248a8ef940c0b5ba90d05f0362ba0 | /src/python/strelka/scanners/scan_pe.py | 626e9df031e01b48ea3c146b00d52c99f1d0d331 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | thezedwards/strelka | b5d794198791f04a9473ae4b7b2f8a75b7ccac9b | 9791ec50354459b4c80df6e95887e0d6bd58729a | refs/heads/master | 2020-05-24T12:34:15.926932 | 2019-05-16T20:51:40 | 2019-05-16T20:51:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,150 | py | import binascii
from datetime import datetime
import hashlib
import struct
import pefile
from strelka import strelka
IMAGE_MAGIC_LOOKUP = {
0x10b: '32_BIT',
0x20b: '64_BIT',
0x107: 'ROM_IMAGE',
}
class ScanPe(strelka.Scanner):
"""Collects metadata from PE files."""
def scan(self, data, file, options, expire_at):
self.event['total'] = {'sections': 0}
try:
pe = pefile.PE(data=data)
pe_dict = pe.dump_dict()
self.event['total']['sections'] = pe.FILE_HEADER.NumberOfSections
self.event['warnings'] = pe.get_warnings()
self.event['timestamp'] = datetime.utcfromtimestamp(pe.FILE_HEADER.TimeDateStamp).isoformat()
machine = pe.FILE_HEADER.Machine
self.event['machine'] = {
'id': machine,
'type': pefile.MACHINE_TYPE.get(machine),
}
# Reference: http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339%28v=vs.85%29.aspx
self.event['image_magic'] = IMAGE_MAGIC_LOOKUP.get(pe.OPTIONAL_HEADER.Magic, 'Unknown')
subsystem = pe.OPTIONAL_HEADER.Subsystem
self.event['subsystem'] = pefile.SUBSYSTEM_TYPE.get(subsystem)
self.event['stack_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfStackReserve
self.event['stack_commit_size'] = pe.OPTIONAL_HEADER.SizeOfStackCommit
self.event['heap_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve
self.event['heap_commit_size'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit
self.event['image_base'] = pe.OPTIONAL_HEADER.ImageBase
self.event['entry_point'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint
self.event['image_characteristics'] = pe_dict.get('Flags')
self.event['dll_characteristics'] = pe_dict.get('DllCharacteristics')
try:
self.event['imphash'] = pe.get_imphash()
except AttributeError:
self.flags.append('no_imphash')
self.event.setdefault('export_functions', [])
export_symbols = pe_dict.get('Exported symbols', [])
for symbols in export_symbols:
name = symbols.get('Name')
if name is not None and isinstance(name, bytes) and name not in self.event['export_functions']:
self.event['export_functions'].append(name)
import_cache = {}
self.event.setdefault('imports', [])
import_symbols = pe_dict.get('Imported symbols', [])
for symbol in import_symbols:
for import_ in symbol:
dll = import_.get('DLL')
if dll is not None:
if dll not in self.event['imports']:
self.event['imports'].append(dll)
import_cache.setdefault(dll, [])
ordinal = import_.get('Ordinal')
if ordinal is not None:
ordinal = pefile.ordlookup.ordLookup(dll.lower(), ordinal, make_name=True)
import_cache[dll].append(ordinal)
name = import_.get('Name')
if name is not None:
import_cache[dll].append(name)
self.event.setdefault('import_functions', [])
for (import_, functions) in import_cache.items():
import_entry = {'import': import_, 'functions': functions}
if import_entry not in self.event['import_functions']:
self.event['import_functions'].append(import_entry)
self.event.setdefault('resources', [])
try:
for resource in pe.DIRECTORY_ENTRY_RESOURCE.entries:
res_type = pefile.RESOURCE_TYPE.get(resource.id, 'Unknown')
for entry in resource.directory.entries:
for e_entry in entry.directory.entries:
sublang = pefile.get_sublang_name_for_lang(
e_entry.data.lang,
e_entry.data.sublang,
)
offset = e_entry.data.struct.OffsetToData
size = e_entry.data.struct.Size
r_data = pe.get_data(offset, size)
language = pefile.LANG.get(e_entry.data.lang, 'Unknown')
data = {
'type': res_type,
'id': e_entry.id,
'name': e_entry.data.struct.name,
'offset': offset,
'size': size,
'sha256': hashlib.sha256(r_data).hexdigest(),
'sha1': hashlib.sha1(r_data).hexdigest(),
'md5': hashlib.md5(r_data).hexdigest(),
'language': language,
'sub_language': sublang,
}
if data not in self.event['resources']:
self.event['resources'].append(data)
except AttributeError:
self.flags.append('no_resources')
if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'):
debug = dict()
for e in pe.DIRECTORY_ENTRY_DEBUG:
rawData = pe.get_data(e.struct.AddressOfRawData, e.struct.SizeOfData)
if rawData.find(b'RSDS') != -1 and len(rawData) > 24:
pdb = rawData[rawData.find(b'RSDS'):]
debug['guid'] = b'%s-%s-%s-%s' % (
binascii.hexlify(pdb[4:8]),
binascii.hexlify(pdb[8:10]),
binascii.hexlify(pdb[10:12]),
binascii.hexlify(pdb[12:20]),
)
debug['age'] = struct.unpack('<L', pdb[20:24])[0]
debug['pdb'] = pdb[24:].rstrip(b'\x00')
self.event['rsds'] = debug
elif rawData.find(b'NB10') != -1 and len(rawData) > 16:
pdb = rawData[rawData.find(b'NB10') + 8:]
debug['created'] = struct.unpack('<L', pdb[0:4])[0]
debug['age'] = struct.unpack('<L', pdb[4:8])[0]
debug['pdb'] = pdb[8:].rstrip(b'\x00')
self.event['nb10'] = debug
self.event.setdefault('sections', [])
sections = pe_dict.get('PE Sections', [])
for section in sections:
section_entry = {
'name': section.get('Name', {}).get('Value', '').replace('\\x00', ''),
'flags': section.get('Flags', []),
'structure': section.get('Structure', ''),
}
if section_entry not in self.event['sections']:
self.event['sections'].append(section_entry)
security = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']]
digital_signature_virtual_address = security.VirtualAddress
if security.Size > 0:
extract_data = pe.write()[digital_signature_virtual_address + 8:]
if len(extract_data) > 0:
self.flags.append('signed')
extract_file = strelka.File(
name='digital_signature',
source=self.name,
)
for c in strelka.chunk_string(extract_data):
self.upload_to_cache(
extract_file.pointer,
c,
expire_at,
)
self.files.append(extract_file)
else:
self.flags.append('empty_signature')
if hasattr(pe, 'FileInfo'):
self.event.setdefault('version_info', [])
for structure in pe.FileInfo:
for fileinfo in structure:
if fileinfo.Key.decode() == 'StringFileInfo':
for block in fileinfo.StringTable:
for name, value in block.entries.items():
fixedinfo = {
'name': name.decode(),
'value': value.decode(),
}
if fixedinfo not in self.event['version_info']:
self.event['version_info'].append(fixedinfo)
else:
self.flags.append('no_version_info')
except IndexError:
self.flags.append('index_error')
except pefile.PEFormatError:
self.flags.append('pe_format_error')
| [
"liburdi.joshua@gmail.com"
] | liburdi.joshua@gmail.com |
b2dc7e238687297569b877965faf69acc67f19ed | 37413580d8f2402068fc9658fbe3df7b897fb728 | /admm1.py | d442db9484c2325200152a67832e3ab97555e760 | [] | no_license | johnston-jeremy/mmv_cvx | b104b3afafe7a3a3ae2e4ebcf814c2b78c751ac1 | 8938a42e69f98da6b6b208114422770eddeaca47 | refs/heads/main | 2023-08-21T19:10:33.897425 | 2021-10-20T04:12:35 | 2021-10-20T04:12:35 | 411,396,714 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | import numpy as np
import numpy.linalg as la
from proximal_operators import prox_l2_norm_batch
def admm_problem1(Y, p):
N, L, M, mu, beta, taux, gamma = p.N, p.L, p.M, p.mu, p.beta, p.taux, p.gamma
X = np.zeros((N,M),dtype=complex)
E = np.zeros_like(Y)
T = np.zeros_like(Y)
A = p.A
AtA = np.matmul(A.T.conj(),A)
AtY = np.matmul(np.conj(A.T),Y)
for t in range(p.maxiter):
Xprev = X
E = mu*beta/(1+mu*beta) * (-np.matmul(A,X) + Y - 1/beta * T)
G = 2*(np.matmul(AtA, X) + np.matmul(np.conj(A.T), E + (1/beta)*T) - AtY)
D = X - taux/2 * G
X = prox_l2_norm_batch(taux/beta, D)
T = T + gamma*beta*(np.matmul(A, X) + E - Y)
if t > 10:
if np.linalg.norm(X-Xprev) <= p.tol*np.linalg.norm(Xprev):
break
return X
| [
"jjohnston1994@gmail.com"
] | jjohnston1994@gmail.com |
6e94570e6231536349f4848a253e2446a7657101 | e1e4bf7539269bf5f34b9ea02b2841f287e047e0 | /test/test_graph.py | daeb7856a893c70921723f3d5964754dc0e57945 | [] | no_license | melifluos/twitter_age_detection | 2b39d0477eef193e71dbd07b81a553ca7010d4cd | 280165c597849e59089b461f2f51c3604f16bb94 | refs/heads/master | 2020-04-11T05:57:16.942127 | 2017-05-18T09:44:54 | 2017-05-18T09:44:54 | 68,198,226 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,291 | py | __author__ = 'benchamberlain'
from ..graph import Graph
from scipy.sparse import csr_matrix
import numpy as np
data = csr_matrix(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 1, 0, 1], [1, 1, 1, 0]]))
data1 = csr_matrix(np.array([[0, 1], [1, 0]]))
edges = np.array([[1, 3, 0], [0, 2, 3], [1, 3, 0], [0, 1, 2]])
degs = np.array([2, 3, 2, 3])
walks = np.array([[0, 2, 3], [1, 3, 1]])
def test_number_of_vertices():
g = Graph(data)
assert g.n_vertices == 4
def test_input_degree():
g = Graph(data)
assert np.array_equal(degs, g.deg)
def test_input_edge_shape():
g = Graph(data)
truth = (4, 3)
assert truth == g.edges.shape
def test_input_edges():
g = Graph(data)
g.build_edge_array()
assert np.array_equal(edges, g.edges)
def test_initialise_walk_array():
g = Graph(data)
num_walks = 10
walk_length = 20
walks = g.initialise_walk_array(num_walks=num_walks, walk_length=walk_length)
assert walks.shape == (40, 20)
assert np.array_equal(walks[:, 0], np.array([0, 1, 2, 3] * 10))
def test_sample_next_vertices():
"""
In the test graph the vertex with index 2 is only connected to vertices 1 and 3
:return:
"""
g = Graph(data)
current_vertices = np.array([2, 2, 2, 2])
for idx in range(10):
next_vertex_indices = g.sample_next_vertices(current_vertices, degs)
for elem in next_vertex_indices:
assert (elem == 0) | (elem == 1)
assert next_vertex_indices.shape == current_vertices.shape
def test_walks_to_list_of_strings():
walks_str = walks.astype(str)
walk_list = walks_str.tolist()
for walk in walk_list:
assert len(walk) == 3
for elem in walk:
assert type(elem) == str
def test_oscillating_random_walk_1walk():
g = Graph(data1)
g.build_edge_array()
walks = g.generate_walks(1, 10)
walk1 = [0, 1] * 5
walk2 = [1, 0] * 5
truth = np.array([walk1, walk2])
print walks
assert np.array_equal(walks, truth)
def test_oscillating_random_walk_2walks():
g = Graph(data1)
g.build_edge_array()
walks = g.generate_walks(2, 10)
walk1 = [0, 1] * 5
walk2 = [1, 0] * 5
truth = np.array([walk1, walk2, walk1, walk2])
print walks
assert np.array_equal(walks, truth) | [
"ben@starcount.com"
] | ben@starcount.com |
f7cf518c9adba372fba54eac9d1c3ca7dbadeeac | 44aa5314f0291f6a5579214ba2d57b894ddcd1ec | /backend/dashboard/models.py | 82b3d803f623cfda4b862f4232d35504c23846ec | [] | no_license | dimnl/modum | f81fef16a599d79f2083ac72484857aadc52a87d | b31f80ac4c1e77ddbcd11d4eb2b1937f1c9215d7 | refs/heads/master | 2022-06-28T02:30:19.190845 | 2020-05-03T04:55:39 | 2020-05-03T04:55:39 | 260,448,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | from django.db import models
# Country information.
class Country(models.Model):
name = models.CharField(max_length=120)
description = models.TextField()
focus = models.CharField(max_length=120, default="")
def _str_(self):
return self.name
# Sectors information.
class Sector(models.Model):
name = models.CharField(max_length=120)
description = models.TextField()
def _str_(self):
return self.name
# Measures information.
class Measure(models.Model):
description = models.TextField()
def _str_(self):
return self.name
| [
"alexandru.neculai96@gmail.com"
] | alexandru.neculai96@gmail.com |
f7af79f56a51603282e3089490b050ca604d2712 | 1410d7722dd22c1ecd2aee0f4c59cf482846f445 | /models/rbm.py | 05e4c797eace5d0b7ec5a60c952e55268a115ae6 | [] | no_license | funzi-son/DRBM | 95a1cb3d504746836d5d8dc2d9fb7b7eeae3fc8c | 1a7c40d46b86ed4d4a8610f3979e94e5e297429b | refs/heads/master | 2021-07-19T16:17:56.423241 | 2017-10-24T03:23:58 | 2017-10-24T03:23:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,384 | py | """The restricted Boltzmann machine"""
# Author: Srikanth Cherla
# City University London (2014)
# Contact: abfb145@city.ac.uk
from models import np
from models import theano
from models import T
theano.config.exception_verbosity = 'high'
def build_model(n_input, n_class, hypers, init_params):
"""Function to build the Theano graph for the RBM.
Input
-----
n_input : integer
Dimensionality of input features to the model.
n_class : integer
Number of class-labels.
hypers : dict
Model hyperparameters.
init_params : list
A list of initial values for the model parameters.
Output
------
x : T.matrix
Input matrix (with number of data points as first dimension).
y : T.ivector
Class labels corresponding to x.
p_y_given_x : T.nnet.softmax
Posterior probability of y given x.
cost: ???
Cost function of the DRBM which is to be optimized.
params: list(T.shared)
A list containing the parameters of the model.
grads: list(T.grad)
A list containing the gradients of the parameters of the model.
"""
n_visible = n_input + n_class
n_hidden = int(hypers['n_hidden'])
L1_decay = float(hypers['weight_decay'])
L2_decay = float(hypers['weight_decay'])
n_gibbs = int(hypers['n_gibbs'])
activation = str(hypers['activation'])
# Random number generators
T_RNG = T.shared_randomstreams.RandomStreams(hypers['seed'])
N_RNG = np.random.RandomState(hypers['seed'])
# 1. Initialize visible layer, inputs and targets
x = T.matrix(name='x', dtype=theano.config.floatX)
y = T.ivector(name='y') # XXX: What should be the type of this?
Y = T.eye(n_class)[y]
v = T.concatenate((x, Y), axis=1)
# Initialize model parameters
if init_params is None:
W_init = np.asarray(
N_RNG.normal(size=(n_visible, n_hidden), scale=0.01),
dtype=theano.config.floatX)
bv_init = np.zeros((n_visible,), dtype=theano.config.floatX)
bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX)
else:
W_init = init_params[0]
bv_init = init_params[1]
bh_init = init_params[2]
W = theano.shared(W_init, name='W') # RBM weight matrix
bv = theano.shared(bv_init, name='bv') # Visible biases
bh = theano.shared(bh_init, name='bh') # Hidden biases
params = [W, bv, bh]
# Build Gibbs chain and graph to compute the cost function
v_sample, cost, updates_train = build_chain(v, n_input, n_class, W,
bv, bh, k=n_gibbs,
activation=activation,
T_RNG=T_RNG)
# Add weight decay (regularization) to cost.
L1 = abs(W).sum()
L2_sqr = (W**2).sum()
cost += (L1_decay*L1 + L2_decay*L2_sqr)
grads = T.grad(cost, params, consider_constant=v_sample)
# Expressions to compute conditional distribution.
p_y_given_x = drbm_fprop(x, params, n_class, activation)
return (x, y, p_y_given_x, cost, params, grads)
def build_chain(v, n_input, n_class, W, bv, bh, k=1, activation='sigmoid',
T_RNG=None):
"""Construct a k-step Gibbs chain starting at v for an RBM.
Input
-----
v : T.matrix or T.vector
If a matrix, multiple chains will be run in parallel (batch).
n_input : int
Dimensionality of input feature.
n_class : int
Number of output classes.
W : T.matrix
Weight matrix of the RBM.
bv : T.vector
Visible bias vector of the RBM.
bh : T.vector
Hidden bias vector of the RBM.
k : int
Length of the Gibbs chain (number of sampling steps).
activation : str
Type of activation function.
T_RNG : T.streams.RandomStreams
Theano random number generator.
Output
------
v_sample : Theano vector or matrix with the same shape as `v`
Corresponds to the generated sample(s).
cost : Theano scalar
Expression whose gradient with respect to W, bv, bh is the CD-k
approximation to the log-likelihood of `v` (training example) under the
RBM. The cost is averaged in the batch case.
updates: dictionary of Theano variable -> Theano variable
The `updates` object returned by scan."""
if T_RNG is None:
T_RNG = T.shared_randomstreams.RandomStreams(860331)
# One iteration of the Gibbs sampler.
def gibbs_step(v):
"""One step of Gibbs sampling in the RBM."""
# Compute hidden layer activations given visible layer
if activation == 'sigmoid':
mean_h = T.nnet.sigmoid(T.dot(v, W) + bh)
h = T_RNG.binomial(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
elif activation == 'tanh':
raise NotImplementedError
elif activation == 'relu': # XXX: Not working
mean_h = T.maximum(0, T.dot(v, W) + bh)
h = T.maximum(0, mean_h + T_RNG.normal(size=mean_h.shape, avg=0.0,
std=T.nnet.sigmoid(mean_h)))
else:
raise NotImplementedError
# Compute visible layer activations given hidden layer
acts_v = T.dot(h, W.T) + bv
# # Multinomial visible units sampling (equally sized)
# # TODO: Make this an if-else section based on an input hyperparameter
# acts_in = acts_v[:, :n_input]
# probs_in = T.nnet.softmax(acts_in)
# v_in = T_RNG.multinomial(n=1, pvals=probs_in,
# dtype=theano.config.floatX)
# acts_out = acts_v[:, -n_class:]
# probs_out = T.nnet.softmax(acts_out)
# v_out = T_RNG.multinomial(n=1, pvals=probs_out,
# dtype=theano.config.floatX)
# mean_v = T.concatenate((probs_in, probs_out), axis=1)
# v = T.concatenate((v_in, v_out), axis=1)
# Binomial visible units sampling
mean_v = T.nnet.sigmoid(acts_v)
v = T_RNG.binomial(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v
# k-step Gibbs sampling loop
chain, updates = theano.scan(lambda v: gibbs_step(v)[1],
outputs_info=[v], non_sequences=[],
n_steps=k)
v_sample = chain[-1]
def free_energy(v):
"""Free energy of RBM visible layer."""
return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum()
cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0]
return v_sample, cost, updates
def drbm_fprop(x, params, n_class, activation):
"""Posterior probability of classes given inputs and model parameters.
Input
-----
x: T.matrix (of type theano.config.floatX)
Input data matrix.
params: list
A list containing the four parameters of the DRBM (see class definition).
n_class: integer
Number of classes.
Output
------
p_y_given_x: T.nnet.softmax
Posterior class probabilities of the targets given the inputs.
"""
# Initialize DRBM parameters and binary class-labels.
U = params[0][-n_class:, :] # or, U = W[n_input:, :]
W = params[0][:-n_class, :] # or, V = W[:n_input, :]
d = params[1][-n_class:] # or, d = bv[:n_input]
c = params[2]
Y_class = theano.shared(np.eye(n_class, dtype=theano.config.floatX),
name='Y_class')
# Compute hidden state activations and energies.
s_hid = T.dot(x, W) + c
energies, _ = theano.scan(lambda y_class, U, s_hid:
s_hid + T.dot(y_class, U),
sequences=[Y_class],
non_sequences=[U, s_hid])
# Compute log-posteriors and then posteriors.
if activation == 'sigmoid':
log_p, _ = theano.scan(
lambda d_i, e_i: d_i + T.sum(T.log(1+T.exp(e_i)), axis=1),
sequences=[d, energies], non_sequences=[])
elif activation == 'tanh':
raise NotImplementedError
elif activation == 'relu':
raise NotImplementedError
else:
raise NotImplementedError
p_y_given_x = T.nnet.softmax(log_p.T) # XXX: Can the transpose be avoided?
return p_y_given_x
| [
"Son.Tran@csiro.au"
] | Son.Tran@csiro.au |
57f3fd827be2d763a94518ae48d40d7b18419a79 | 6a275ce8642562f93c659b58f5c47bc5cf84f85c | /luffy_django/x.py | ca687f2cd7b3383d3e77b26de20b5d659e1fde9f | [] | no_license | LXianB/school_city | ec2d5080ae13bc3e0279fe9f0cee55a60269efa1 | 7c06deb063d3ed1c20c6b26275f51111e280bb79 | refs/heads/master | 2020-07-28T19:38:50.257633 | 2019-09-19T09:28:28 | 2019-09-19T09:28:28 | 209,513,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | import json
v = {
1:'db',
2:'sb',
}
s = json.dumps(v)
print(json.loads(s))
| [
"1135487517A392C3588D2B7E9075EE33@i-search.com.cn"
] | 1135487517A392C3588D2B7E9075EE33@i-search.com.cn |
a341ad35c0cc89c48ba55282eb9191fa23f561e4 | af2b4fbb514468846b2d96f5010292d9b973b819 | /tests/test_cli.py | 2f1eeee3b88c87c7ac853abcc6124c52ef2a6a1d | [
"MIT"
] | permissive | rdimaio/parsa | 350f4f2b8bb07d11ce2f3edf359b758208c8f79e | 9ce45235efb702cea0aa4254ed2f2c91d56d34a8 | refs/heads/master | 2021-07-25T17:46:29.169755 | 2020-04-13T11:38:44 | 2020-04-13T11:38:44 | 146,991,563 | 1 | 0 | MIT | 2018-10-11T21:11:34 | 2018-09-01T11:08:47 | Python | UTF-8 | Python | false | false | 1,690 | py | """Tests for utils/cli.py.
Tests:
parse_arguments:
empty_args
no_output_arg_passed
output_arg_passed
_set_arguments:
tested implicitly in the parse_arguments test
"""
import unittest
import os
import sys
try:
from unittest import mock
except ImportError:
import mock
sys.path.append(os.path.abspath('..'))
from parsa.utils import cli
class CLITest(unittest.TestCase):
def test_parse_arguments_empty_args(self):
"""When sys.argvs is empty, the function should exit with SystemExit: 2."""
testargs = ['']
with mock.patch.object(sys, 'argv', testargs):
# https://stackoverflow.com/a/13491726
with self.assertRaises(SystemExit) as sys_e:
cli.parse_arguments()
self.assertEqual(sys_e.exception.code, 2)
def test_parse_arguments_no_output_arg_passed(self):
"""Only the input argument is passed."""
cli_input_arg = 'foo'
testargs = ['', cli_input_arg]
with mock.patch.object(sys, 'argv', testargs):
args = vars(cli.parse_arguments())
self.assertEqual(args['input'], cli_input_arg)
def test_parse_arguments_output_arg_passed(self):
"""Both the input and output arguments are passed."""
cli_input_arg = 'foo'
cli_output_arg = 'bar'
testargs = ['',
'-o', cli_output_arg,
cli_input_arg]
with mock.patch.object(sys, 'argv', testargs):
args = vars(cli.parse_arguments())
self.assertEqual(args['input'], cli_input_arg)
self.assertEqual(args['output'], cli_output_arg) | [
"riccardodimaio11@gmail.com"
] | riccardodimaio11@gmail.com |
c82bef90561c2f2fe7c38bf3d5bde226a9fd1930 | 33f448e2d3315f758675c852e5853a16813a211b | /melon/crawler/forms.py | 34a1d217bbc00cff1c396a272af7ed993f140571 | [] | no_license | CuCTeMeH/image_scrapper | f400c4b67934fa17dc48eeaf2355f91bd379b540 | 567e301840d648a4ca912dfa1eb3228cc973838e | refs/heads/master | 2020-06-08T14:25:15.572146 | 2013-08-15T20:25:36 | 2013-08-15T20:25:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django import forms
from crawler.models import site_url, site_image
from django.http import HttpResponse
class UrlForm(forms.Form):
url = forms.URLField() | [
"kutinchev87@gmail.com"
] | kutinchev87@gmail.com |
526bfabd8d4add6feced315ac34c366676ccfe3d | 69864d3134f224fb00c46327f7cd4f34c8749f3f | /bongo/apps/frontend/tests/view_tests.py | 1fbb400ab0d3d6551e735cfffb3308c990d2cd84 | [
"MIT"
] | permissive | BowdoinOrient/bongo | 4298e2f1a353be94640ceb2b24da1178abaf92ec | 3a78dd8a8f9d853661ba9f0b7df900ec497940a1 | refs/heads/develop | 2021-01-17T17:07:18.582716 | 2015-10-25T15:33:47 | 2015-10-25T15:33:47 | 18,506,776 | 3 | 1 | null | 2015-10-25T15:33:48 | 2014-04-07T04:57:02 | Python | UTF-8 | Python | false | false | 3,185 | py | from django.test import TestCase
from bongo.apps.bongo.tests import factories
class ArticleViewTestCase(TestCase):
def test_by_slug(self):
"""Test that you can route to an article by using its slug"""
post = factories.PostFactory.create()
response = self.client.get("/article/{}/".format(post.slug))
self.assertEqual(response.status_code, 200)
def test_by_id(self):
"""Test that you can route to an article by using its ID"""
post = factories.PostFactory.create()
response = self.client.get("/article/{}/".format(post.id))
self.assertEqual(response.status_code, 301)
self.assertEqual(response.url, "http://testserver/article/{}/".format(post.slug))
class HomeViewTestCase(TestCase):
pass
class AuthorViewTestCase(TestCase):
def test_creator_view_context(self):
creator = factories.CreatorFactory.create()
posts = [factories.PostFactory.create() for x in range(5)]
for post in posts:
article = post.text.first()
article.creators.add(creator)
article.save()
post.save(auto_dates=False)
res = self.client.get('/author/{}/'.format(creator.pk))
self.assertEqual(creator, res.context['creator'])
self.assertEqual(set(posts), set(res.context['posts']))
def test_series_view_route(self):
creator = factories.CreatorFactory.create()
self.assertEqual(self.client.get('/author/{}/'.format(creator.pk)).status_code, 200)
self.assertEqual(self.client.get('/author/'.format(creator.pk)).status_code, 404)
self.assertEqual(self.client.get('/author/0/').status_code, 404)
class SeriesViewTestCase(TestCase):
def test_series_view_context(self):
series = factories.SeriesFactory.create()
posts = [factories.PostFactory.create() for x in range(5)]
for post in posts:
post.series.add(series)
post.save(auto_dates=False)
res = self.client.get('/series/{}/'.format(series.pk))
self.assertEqual(series, res.context['series'])
self.assertEqual(set(posts), set(res.context['posts']))
def test_series_view_route(self):
series = factories.SeriesFactory.create()
self.assertEqual(self.client.get('/series/{}/'.format(series.pk)).status_code, 200)
self.assertEqual(self.client.get('/series/'.format(series.pk)).status_code, 404)
self.assertEqual(self.client.get('/series/0/').status_code, 404)
class StaticViewsTestCase(TestCase):
def test_about_view(self):
res = self.client.get('/about/')
self.assertEqual(res.status_code, 200)
def test_ethics_view(self):
res = self.client.get('/ethics/')
self.assertEqual(res.status_code, 200)
def test_subscribe_view(self):
res = self.client.get('/subscribe/')
self.assertEqual(res.status_code, 200)
def test_advertise_view(self):
res = self.client.get('/advertise/')
self.assertEqual(res.status_code, 200)
def test_contact_view(self):
res = self.client.get('/contact/')
self.assertEqual(res.status_code, 200)
| [
"bjacobel@localytics.com"
] | bjacobel@localytics.com |
d090e08a3de3c1ee882e0a3704be94198a57b77b | 70134d55728500641c6edc422bb34159c0816fb4 | /fi/migrations/0002_blog.py | 4ebecd9c47fff0dcc69bebaed2d594afd74790f5 | [] | no_license | leenamkyoung/forfor | 9f3df218b67f16eb68f0c928ec5566e9f94bc183 | bbc725941bb5543871f27cf4783b6f986d83c6a9 | refs/heads/master | 2022-12-11T19:31:23.561523 | 2019-10-19T11:44:44 | 2019-10-19T11:44:44 | 205,156,784 | 0 | 0 | null | 2022-11-22T04:13:04 | 2019-08-29T12:16:33 | HTML | UTF-8 | Python | false | false | 626 | py | # Generated by Django 2.2.3 on 2019-08-16 05:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fi', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('body', models.TextField()),
],
),
]
| [
"729497@likelion.org"
] | 729497@likelion.org |
78a12d8bff14792b00e4507e76858d1a178bc660 | c60ef27fe285c73fad4076122bb3d6f2fe05f111 | /fragscapy/modifications/ipv4_frag.py | 02fb8dd3eaf1d8ea14cfed0938fc1dfb0fb6c079 | [
"MIT"
] | permissive | daeon/Fragscapy | be88d8b3c6fc309515ecf5f06939f43ddf8022a5 | 3ee7f5c73fc6c7eb64858e197c0b8d2b313734e0 | refs/heads/master | 2023-08-31T09:27:31.931466 | 2021-05-27T20:01:11 | 2021-05-27T20:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,743 | py | """Fragments the IPv4 packets at the L3-layer."""
import scapy.layers.inet
import scapy.packet
from fragscapy.modifications.mod import Mod
from fragscapy.packetlist import PacketList
class Ipv4Frag(Mod):
"""Fragments the IPv4 packets at the L3-layer.
Fragment each IPv4 packet. the fragmentation size must be specified. It
represents the maximum size of each packet (including headers). It uses
the scapy's fragmentation function.
Args:
*args: The arguments of the mods.
Attributes:
fragsize: The fragmentation size (maximum length of a fragment).
Raises:
ValueError: Unrecognized or incorrect number of parameters.
Examples:
>>> Ipv4Frag(32).fragsize
32
"""
name = "Ipv4Frag"
doc = ("Fragments the IPv4 packets at the L3-layer\n"
"ipv4_frag <size>")
_nb_args = 1
def parse_args(self, *args):
"""See base class."""
try:
self.fragsize = int(args[0])
except ValueError:
raise ValueError("Parameter 1 unrecognized. "
"Got {}".format(args[0]))
def apply(self, pkt_list):
"""Fragment each IPv6 packet. See `Mod.apply` for more details."""
new_pl = PacketList()
for pkt in pkt_list:
if pkt.pkt.haslayer('IP'):
fragments = scapy.layers.inet.fragment(pkt.pkt, self.fragsize)
index = len(new_pl) - 1
for fragment in fragments:
new_pl.add_packet(fragment)
new_pl.edit_delay(index, pkt.delay)
else:
# Not IPv4 so no fragmentation
new_pl.add_packet(fragment, pkt.delay)
return new_pl
| [
"frederic.guihery@amossys.fr"
] | frederic.guihery@amossys.fr |
23125c25618090c23ad4cc628d0d9d5904b49c6f | ab2b6204ae7056f4aec2242b2256834eebf844a6 | /docs/source/conf.py | 3c33aacdb7df4d83003dd678f09ebd86982a36da | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | vickielee333/scarplet | e671bc071e80fc2c4a4457e7f93f8fcb39e157cf | 8cf8cee2c9b808c550c0645f7836cda6d809872e | refs/heads/master | 2020-12-23T23:50:14.408885 | 2019-10-11T23:31:41 | 2019-10-11T23:31:41 | 237,314,286 | 0 | 0 | null | 2020-01-30T22:01:23 | 2020-01-30T22:01:22 | null | UTF-8 | Python | false | false | 6,445 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath("../.."))
# sys.path.insert(0, os.path.abspath('../../scarplet'))
sys.path.insert(0, "/home/rmsare/.local/lib/python3.4/site-packages")
# -- Project information -----------------------------------------------------
project = u"Scarplet"
copyright = u"2018, Robert Sare, George Hilley"
author = u"Robert Sare, George Hilley"
# The short X.Y version
version = u""
# The full version, including alpha/beta/rc tags
release = u"0.1.0"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.coverage",
"sphinx.ext.imgmath",
"sphinx.ext.viewcode",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
]
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_timeout = -1
# Build API docs
def run_apidoc(_):
ignore_paths = [os.path.join("../..", "scarplet", "tests/*")]
argv = [
"-f",
"-e",
"-M",
"-o",
".",
os.path.join("../..", "scarplet"),
] + ignore_paths
try:
# Sphinx 1.7+
from sphinx.ext import apidoc
apidoc.main(argv)
except ImportError:
# Sphinx 1.6 (and earlier)
from sphinx import apidoc
argv.insert(0, apidoc.__file__)
apidoc.main(argv)
def setup(app):
app.connect("builder-inited", run_apidoc)
# Mock imports
import sys
from unittest.mock import MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = [
"matplotlib",
"matplotlib.pyplot",
"numexpr",
"numpy",
"numpy.ma",
"osgeo",
"pyfftw",
"pyfftw.interfaces.numpy_fft",
"rasterio",
"rasterio.fill",
"scipy.special",
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# Add any paths that contain templates here, relative to this directory.
templates_path = [".templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
import msmb_theme
html_theme = "msmb_theme"
html_theme_path = [msmb_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {"style_nav_header_background": "#2980B9"}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = [".static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "Scarpletdoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"Scarplet.tex",
u"Scarplet Documentation",
u"Robert Sare, George Hilley",
"manual",
)
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "scarplet", u"Scarplet Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"Scarplet",
u"Scarplet Documentation",
author,
"Scarplet",
"One line description of project.",
"Miscellaneous",
)
]
# -- Extension configuration -------------------------------------------------
| [
"robertmsare@gmail.com"
] | robertmsare@gmail.com |
558d7cf4a36cfe878990521818b11026bcc8b7b2 | bba60e8fdde48cdcd07b0fef80e85f931b9377af | /labs/week5/perspective_lab.py | 8dd0c967dd763a09d6bcf4c90c1ea411892f1af2 | [] | no_license | atomminder/Coursera_Brown_Coding_the_matrix | dc233e2ecfc7025a15f7c5b72c2c2b3501e13249 | 5e5e26cff4db4b39e63acf003c00350c1d83a5d7 | refs/heads/master | 2016-09-06T02:59:04.266312 | 2013-08-26T04:52:42 | 2013-08-26T04:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,462 | py | from image_mat_util import *
from mat import Mat
from matutil import *
from vec import Vec
from solver import solve
## Task 1
def move2board(v):
'''
Input:
- v: a vector with domain {'y1','y2','y3'}, the coordinate representation of a point q.
Output:
- A {'y1','y2','y3'}-vector z, the coordinate representation
in whiteboard coordinates of the point p such that the line through the
origin and q intersects the whiteboard plane at p.
'''
result_vec = Vec({'y1','y2','y3'}, {})
result_vec['y1'] = v['y1'] / v['y3'];
result_vec['y2'] = v['y2'] / v['y3'];
result_vec['y3'] = 1;
return result_vec
## Task 2
def make_equations(x1, x2, w1, w2):
'''
Input:
- x1 & x2: photo coordinates of a point on the board
- y1 & y2: whiteboard coordinates of a point on the board
Output:
- List [u,v] where u*h = 0 and v*h = 0
'''
domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}}
u = Vec(domain, {})
u[('y3','x1')] = w1 * x1
u[('y3','x2')] = w1 * x2
u[('y3','x3')] = w1
u[('y1','x1')] = -x1
u[('y1','x2')] = -x2
u[('y1','x3')] = -1
v = Vec(domain, {})
v[('y3','x1')] = w2 * x1
v[('y3','x2')] = w2 * x2
v[('y3','x3')] = w2
v[('y2','x1')] = -x1
v[('y2','x2')] = -x2
v[('y2','x3')] = -1
return [u, v]
## Task 3
# calculate
u1,v1 = make_equations(329,597,0,1)
u2,v2 = make_equations(358,36,0,0)
u3,v3 = make_equations(592,157,1,0)
u4,v4 = make_equations(580,483,1,1)
domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}}
last_vec = Vec(domain, {})
last_vec[('y1','x1')] = 1
vector_list = [u1,v1,u2,v2,u3,v3,u4,v4,last_vec]
L = rowdict2mat(vector_list)
#print(L)
b = Vec({0,1,2,3,4,5,6,7,8},{8:1})
#print(b)
h = solve(L,b)
#residual = b - L*h
#if residual * residual < 10e-14:
# print(True)
#else:
# print(False)
#print(h)
#H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}),{})
#H[('y1','x1')] = 1
#H[('y1','x2')] = 0.0517
#H[('y1','x3')] = -360
#H[('y2','x1')] = -0.382
#H[('y2','x2')] = 0.738
#H[('y2','x3')] = 110
#H[('y3','x1')] = -0.722
#H[('y3','x2')] = -0.0117
#H[('y3','x3')] = 669
#H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}),
# {('y1','x1'):1,('y1','x2'):0.0517,('y1','x3'):-360,
# ('y2','x1'):-0.382,('y2','x2'):0.738,('y2','x3'):110,
# ('y3','x1'):-0.722,('y3','x2'):-0.0117,('y3','x3'):669})
H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}),
h.f)
## Task 4
def mat_move2board(Y):
'''
Input:
- Y: Mat instance, each column of which is a 'y1', 'y2', 'y3' vector
giving the whiteboard coordinates of a point q.
Output:
- Mat instance, each column of which is the corresponding point in the
whiteboard plane (the point of intersection with the whiteboard plane
of the line through the origin and q).
'''
for i in Y.D[1]:
Y['y1',i] = Y['y1',i] / Y['y3',i]
Y['y2',i] = Y['y2',i] / Y['y3',i]
Y['y3',i] = 1
return Y
# test
#(X_pts, colors) = file2mat('board.png', ('x1','x2','x3'))
#Y_pts = H * X_pts
#print(Y_pts.D[0])
# print(leY_pts.D[1])
#Y_in = Mat(({'y1', 'y2', 'y3'}, {0,1,2,3}),
#{('y1',0):2, ('y2',0):4, ('y3',0):8,
#('y1',1):10, ('y2',1):5, ('y3',1):5,
#('y1',2):4, ('y2',2):25, ('y3',2):2,
#('y1',3):5, ('y2',3):10, ('y3',3):4})
#print(Y_in)
#print(mat_move2board(Y_in))
#print(Y) | [
"ice.sagittarius@gmail.com"
] | ice.sagittarius@gmail.com |
c6d8420a5dbe972ad01f97e4030bb4c992a72670 | 3b1c209057f5a692846396590b963d7cf7302e5a | /source/inputters/field.py | 62a013e16d3e5f39d84a4abcb1677033a14ac8ca | [] | no_license | Cli212/Knowledge_Driven_Dialogue | 53be06728acb9301dc3b0df43680865720e7d906 | e9817b59fce30585ae2715a4d8748b8814b337a3 | refs/heads/master | 2020-09-24T13:09:45.081795 | 2020-02-04T11:28:03 | 2020-02-04T11:28:03 | 225,765,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,456 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import re
import nltk
import torch
from tqdm import tqdm
from collections import Counter
from bert_serving.client import BertClient
PAD="[PAD]"
UNK="[UNK]"
BOS="[BOS]"
EOS="[EOS]"
NUM="[NUM]"
def tokenize(s):
"""
tokenize
"""
s = re.sub('\d+', NUM, s).lower()
# tokens = nltk.RegexpTokenizer(r'\w+|<sil>|[^\w\s]+').tokenize(s)
tokens = s.split(' ')
return tokens
class Field(object):
def __init__(self,
sequential=False,
dtype=None):
self.sequential = sequential
self.dtype = dtype if dtype is not None else int
def str2num(self, string):
raise NotImplementedError
def num2str(self, number):
raise NotImplementedError
def numericalize(self, strings):
if isinstance(strings, str):
return self.str2num(strings)
else:
return [self.numericalize(s) for s in strings]
def denumericalize(self, numbers):
if isinstance(numbers, torch.Tensor):
with torch.cuda.device_of(numbers):
numbers = numbers.tolist()
if self.sequential:
if not isinstance(numbers[0], list):
return self.num2str(numbers)
else:
return [self.denumericalize(x) for x in numbers]
else:
if not isinstance(numbers, list):
return self.num2str(numbers)
else:
return [self.denumericalize(x) for x in numbers]
class NumberField(Field):
def __init__(self,
sequential=False,
dtype=None):
super(NumberField, self).__init__(sequential=sequential,
dtype=dtype)
def str2num(self, string):
if self.sequential:
return [self.dtype(s) for s in string.split(" ")]
else:
return self.dtype(string)
def num2str(self, number):
if self.sequential:
return " ".join([str(x) for x in number])
else:
return str(number)
class TextField(Field):
def __init__(self,
tokenize_fn=None,
pad_token=PAD,
unk_token=UNK,
bos_token=BOS,
eos_token=EOS,
special_tokens=None,
embed_file=None):
super(TextField, self).__init__(sequential=True,
dtype=int)
self.tokenize_fn = tokenize_fn if tokenize_fn is not None else str.split
self.pad_token = pad_token
self.unk_token = unk_token
self.bos_token = bos_token
self.eos_token = eos_token
self.embed_file = embed_file
specials = [self.pad_token, self.unk_token,
self.bos_token, self.eos_token]
self.specials = [x for x in specials if x is not None]
if special_tokens is not None:
for token in special_tokens:
if token not in self.specials:
self.specials.append(token)
self.itos = []
self.stoi = {}
self.vocab_size = 0
self.embeddings = None
def build_vocab(self, texts, min_freq=0, max_size=None):
def flatten(xs):
flat_xs = []
for x in xs:
if isinstance(x, str):
flat_xs.append(x)
elif isinstance(x[0], str):
flat_xs += x
else:
flat_xs += flatten(x)
return flat_xs
# flatten texts
texts = flatten(texts)
counter = Counter()
for string in tqdm(texts):
tokens = self.tokenize_fn(string)
counter.update(tokens)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in self.specials:
del counter[tok]
self.itos = list(self.specials)
if max_size is not None:
max_size = max_size + len(self.itos)
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
cover = 0
for word, freq in words_and_frequencies:
if word=='' or word=='\u3000':
print(f'่ทณ่ฟ{word}')
continue
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
cover += freq
cover = cover / sum(freq for _, freq in words_and_frequencies)
print(
"Built vocabulary of size {} (coverage: {:.3f})".format(len(self.itos), cover))
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vocab_size = len(self.itos)
#if self.embed_file is not None:
self.embeddings = self.build_word_embeddings(self.embed_file)
def build_word_embeddings(self, embed_file):
bc = BertClient(ip='34.84.105.174')
try:
embeds=bc.encode(self.itos).tolist()
print('buillding embedding succeed')
except:
raise('building embedding fail')
return embeds
def dump_vocab(self):
vocab = {"itos": self.itos,
"embeddings": self.embeddings}
return vocab
def load_vocab(self, vocab):
self.itos = vocab["itos"]
self.stoi = {tok: i for i, tok in enumerate(self.itos)}
self.vocab_size = len(self.itos)
self.embeddings = vocab["embeddings"]
def str2num(self, string):
tokens = []
unk_idx = self.stoi[self.unk_token]
if self.bos_token:
tokens.append(self.bos_token)
tokens += self.tokenize_fn(string)
if self.eos_token:
tokens.append(self.eos_token)
indices = [self.stoi.get(tok, unk_idx) for tok in tokens]
return indices
def num2str(self, number):
tokens = [self.itos[x] for x in number]
if tokens[0] == self.bos_token:
tokens = tokens[1:]
text = []
for w in tokens:
if w != self.eos_token:
text.append(w)
else:
break
text = [w for w in text if w not in (self.pad_token, )]
text = " ".join(text)
return text
| [
"hehaoyuh212@gmail.com"
] | hehaoyuh212@gmail.com |
a40210a0d39a519003150aaa7cb19f9db7fddaac | f65c805853692cdbd283aef7c7b4617dc31c5f22 | /users/migrations/0001_initial.py | 86ae316edc72588df34c44d74bbd55914600185c | [] | no_license | Akshara21/webapp_django | 0d4b202d213669daa70d84413729efb7f728743c | 3f299730325cc8a1b19301e02c13089a1e090455 | refs/heads/master | 2022-11-28T10:22:09.793661 | 2020-08-09T17:44:41 | 2020-08-09T17:44:41 | 286,065,223 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 3.0.8 on 2020-07-30 16:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"64507018+Akshara21@users.noreply.github.com"
] | 64507018+Akshara21@users.noreply.github.com |
fe2e973dabfdf530953f5bf46341be8fd3a0d225 | f0d028b3bdd676c68bef1b4653e9219b42fcd13f | /soko/tracom/models.py | a208ce41a4032c2e1e8d0478a02882f7f36cb47c | [
"BSD-3-Clause"
] | permissive | PYTHON-Techy-Work/soko | ed432638fc23760f26ee87f38d6fb09264eb584e | 0cebe6e4a71ca12be7674e7a6fa579ff53a6773b | refs/heads/master | 2021-10-26T00:53:50.231735 | 2019-04-09T08:48:24 | 2019-04-09T08:48:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,033 | py | # -*- coding: utf-8 -*-
"""Tracom models."""
import datetime as dt
from soko.database import Column, Model, SurrogatePK, db, reference_col, relationship
class Tracom(SurrogatePK, Model):
__tablename__ = 'transporters'
user_id = reference_col('users', nullable=True)
user = relationship('User', backref='transporters')
vehicle_id = reference_col('vehicles', nullable=False)
vehicle = relationship('Vehicle', backref='transporters')
photo = Column(db.String(80), nullable=False)
licence = Column(db.String(80), unique=True, nullable=False)
location = Column(db.String(30), nullable=True)
created_at = Column(db.DateTime, nullable=False, default=dt.datetime.utcnow)
def __init__(self, user, vehicle, photo, licence, location):
self.user = user
self.vehicle = vehicle
self.photo = photo
self.licence = licence
self.location = location
def __repr__(self):
return '<Transporter %r>' % self.user + self.vehicle + self.licence + self.location
| [
"james.njuguna@tracom.co.ke"
] | james.njuguna@tracom.co.ke |
11c66006ed6d3c4e596b603f51d2fe4f6fefa7eb | de7b894f683cce849992b0df0f92bc06795665f1 | /Bicycles/models.py | 335bccc4a3e0a8f8e8be68fa47aa25dc063c1879 | [] | no_license | ursaminor27169/RentCarBcle | d709cd3277663b043e0fa5d04c8a4927e82b11b0 | 5265b558567fb49a800bfe47881cf046911fcc21 | refs/heads/master | 2023-06-17T00:10:18.926196 | 2021-07-11T03:28:34 | 2021-07-11T03:28:34 | 384,797,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | from django.db import models
class Marks(models.Model):
id = models.AutoField(primary_key=True)
mark = models.CharField(max_length=20, db_index=True, verbose_name='ะะฐัะบะฐ')
model = models.CharField(max_length=20, db_index=True, verbose_name='ะะพะดะตะปั')
def __str__(self):
return self.mark + ' ' + self.model
class Meta:
verbose_name_plural = 'ะะฐัะบะธ ะธ ะผะพะดะตะปะธ'
verbose_name = 'ะะฐัะบะฐ ะธ ะผะพะดะตะปั'
ordering = ['mark']
class Type(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20, db_index=True, verbose_name='ะขะธะฟั')
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'ะขะธะฟั'
verbose_name = 'ะขะธะฟ'
ordering = ['name']
class Frame(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=20, db_index=True, verbose_name='ะ ะฐะผั')
abb = models.CharField(max_length=20, db_index=True, verbose_name='ะะฑะฑัะธะฒะธะฐัััะฐ')
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'ะ ะฐะผั'
verbose_name = 'ะ ะฐะผะฐ'
ordering = ['name']
class Wheels(models.Model):
id = models.AutoField(primary_key=True)
mark = models.CharField(max_length=20, db_index=True, verbose_name='ะะฐัะบะฐ')
model = models.CharField(max_length=20, db_index=True, verbose_name='ะะพะดะตะปั')
diameter = models.FloatField(null=True, blank=True, verbose_name='ะะธะฐะผะตัั')
def __str__(self):
return self.mark + ' ' + self.model + ' ' + str(self.diameter)
class Meta:
verbose_name_plural = 'ะะพะปััะฐ'
verbose_name = 'ะะพะปััะฐ'
ordering = ['mark']
class Bsles(models.Model):
id = models.AutoField(primary_key=True)
stamp = models.ForeignKey('Marks', null=True, on_delete=models.PROTECT, verbose_name='ะะฐัะบะฐ - ะะพะดะตะปั')
type = models.ForeignKey('Type', null=True, on_delete=models.PROTECT, verbose_name='ะขะธะฟ')
frame = models.ForeignKey('Frame', null=True, on_delete=models.PROTECT, verbose_name='ะ ะฐะผะฐ')
wheels = models.ForeignKey('Wheels', null=True, on_delete=models.PROTECT, verbose_name='ะะพะปะตัะฐ')
description = models.TextField(null=True, blank=True, verbose_name='ะะฟะธัะฐะฝะธะต')
weight = models.FloatField(null=True, blank=True, verbose_name='ะะตั')
price = models.FloatField(null=True, blank=True, verbose_name='ะฆะตะฝะฐ')
reservation = models.BooleanField(null=False, blank=True, verbose_name='ะ ะตะทะตัะฒ')
image = models.ImageField(upload_to='images/', null=True)
class Meta:
verbose_name_plural = 'ะะตะปะพัะธะฟะตะดั'
verbose_name = 'ะะตะปะพัะธะฟะตะด'
ordering = ['stamp'] | [
"katya-polyakova-2429@mail.ru"
] | katya-polyakova-2429@mail.ru |
0f06f71767bd30c22c6e18ec01e9a0c89ee695ce | 3dfaf9d28b59c23ee3432c26dbf499589645a0b7 | /timed_io.py | 7326af92a0c9a56229bd514e916cbf0b46495fc0 | [] | no_license | wielgusm/timed_new | c9b82575c2cc77a83fd94b1ee7a085748f8b726f | d0017af0985bad0fd5cb98f675382f05824650d8 | refs/heads/master | 2020-03-29T01:46:21.353351 | 2018-11-05T17:14:15 | 2018-11-05T17:14:15 | 149,405,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,272 | py | import sys, os, itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#import qmetric
from timed_new import qmetric
from astropy.time import Time
import datetime as datetime
try:
import ehtim as eh
except ModuleNotFoundError:
sys.path.append('/Volumes/DATAPEN/Shared/EHT/EHTIM/eht-imaging_polrep/eht-imaging/')
import ehtim as eh
nam2lett = {'ALMA':'A','AA':'A','A':'A',
'APEX':'X','AP':'X','X':'X',
'LMT':'L','LM':'L','L':'L',
'PICOVEL':'P','PICO':'P','PV':'P','P':'P','IRAM30':'P',
'SMTO':'Z','SMT':'Z','AZ':'Z','Z':'Z',
'SPT':'Y','SP':'Y','Y':'Y',
'JCMT':'J','JC':'J','J':'J',
'SMAP':'S','SMA':'S','SM':'S','S':'S',
'SMAR':'R','R':'R','SR':'R',
'B':'B','C':'C','D':'D'}
pol_dic={'LL':'ll','ll':'ll','L':'ll',
'RR':'rr','rr':'rr','R':'rr',
'RL':'rl','rl':'rl',
'LR':'lr','lr':'lr'}
def load_uvfits(path_to_data,tcoh=-1,single_letter=True,polrep='circ',polar=None):
if polar=='LL':polar='L'
if polar=='RR':polar='R'
try: obs = eh.obsdata.load_uvfits(path_to_data,polrep=polrep,force_singlepol=polar)
except TypeError: obs = eh.obsdata.load_uvfits(path_to_data,force_singlepol=polar)
#if full_polar: obs.df = make_df_full_cp(obs)
#else: obs.df = eh.statistics.dataframes.make_df(obs)
obs.df = eh.statistics.dataframes.make_df(obs)
if (type(tcoh)!=str):
if (tcoh > 0):
obs = obs.avg_coherent(inttime=tcoh)
else:
if tcoh=='scan':
try:
foo = len(obs.scan)
except:
print('Adding scans automatically')
obs.add_scans()
obs = obs.avg_coherent(inttime=1,scan_avg=True)
tobs=tobsdata(obs,single_letter=single_letter)
return tobs
#def load_csv(path_to_data, product, columns=None):
class tobsdata:
def __init__(self,obs,single_letter=True):
try: self.df=obs.df
except AttributeError:
obs.df = eh.statistics.dataframes.make_df(obs)
if single_letter:
if np.mean([len(x) for x in np.asarray(obs.df['baseline'])]) > 2.5:
obs.df['baseline'] = [nam2lett[x.split('-')[0]]+nam2lett[x.split('-')[1]] for x in list(obs.df['baseline'])]
self.source = obs.source
self.df=obs.df
self.ra=obs.ra
self.dec=obs.dec
self.data=obs.data
self.mjd=obs.mjd
try: self.polrep=obs.polrep
except AttributeError: pass
try: self.scans=obs.scans
except: pass
def get_tseries(self,ident,product='',polar='none'):
return tseries(self,ident,product=product,polar=polar)
class fake_tobs:
def __init__(self,**kwargs):
for key in kwargs:
setattr(self, key, kwargs[key])
try: foo = self.source
except AttributeError: self.source='source'
try: foo = self.ra
except AttributeError: self.ra=0
try: foo = self.dec
except AttributeError: self.dec=0
class tseries:
def __init__(self,tobs,ident,product='',polar='none',csv_path='',csv_columns=None,csv_product=None,**kwargs):
if product=='csv':
tobs = fake_tobs(**kwargs)
foo = pd.read_csv(csv_path,names=csv_columns)
if product=='':
if len(ident)==2: product='amp'
elif len(ident)==3: product='cphase'
elif len(ident)==4: product='lcamp'
self.product=product
if product=='csv': self.type=csv_product
else: self.type=product
self.ident = ident
self.polarization = polar
self.source = tobs.source
self.ra=tobs.ra
self.dec=tobs.dec
if product=='amp':
foo = tobs.df[(tobs.df.baseline==ident) | (tobs.df.baseline==ident[1]+ident[0])]
if polar != 'none':
polamp=pol_dic[polar]+'amp'
polsigma=pol_dic[polar]+'sigma'
else: polamp='amp'; polsigma='sigma'
foo=foo[foo[polamp]==foo[polamp]].copy()
self.mjd = np.asarray(foo.mjd)
self.time = np.asarray(foo.time)
self.amp = np.asarray(foo[polamp])
self.sigma = np.asarray(foo[polsigma])
self.data = foo
elif product=='cphase':
foo = get_cphase(tobs,ident,polar=polar)
foo=foo[foo.cphase==foo.cphase].copy()
self.mjd = np.asarray(foo.mjd)
self.time = np.asarray(foo.time)
self.cphase = np.asarray(foo.cphase)
self.sigmaCP = np.asarray(foo.sigmaCP)
self.data = foo
elif product=='lcamp':
foo = get_lcamp(tobs,ident,polar=polar)
#if polar!='none': foo = foo.dropna(subset=[polamp])
foo=foo[foo.lcamp==foo.lcamp].copy()
self.mjd = np.asarray(foo.mjd)
self.time = np.asarray(foo.time)
self.lcamp = np.asarray(foo.lcamp)
self.sigmaLCA = np.asarray(foo.sigmaLCA)
self.data = foo
elif product=='lcfrac':
foo = get_lcfrac(tobs,ident)
#if polar!='none': foo = foo.dropna(subset=[polamp])
foo=foo[foo.lcfrac==foo.lcfrac].copy()
self.mjd = np.asarray(foo.mjd)
self.time = np.asarray(foo.time)
self.lcfrac = np.asarray(foo.lcfrac)
self.sigmaLCF = np.asarray(foo.sigmaLCF)
self.data = foo
elif product=='cfrac':
foo = get_cfrac(tobs,ident)
#if polar!='none': foo = foo.dropna(subset=[polamp])
foo=foo[foo.cfrac==foo.cfrac].copy()
self.mjd = np.asarray(foo.mjd)
self.time = np.asarray(foo.time)
self.cfrac = np.asarray(foo.cfrac)
self.sigmaCF = np.asarray(foo.sigmaCF)
self.data = foo
elif product=='csv':
for col in csv_columns:
setattr(self, col, foo[col])
self.data = foo
try: goo=self.time
except AttributeError: self.time=self.mjd
def plot(self,line=False,figsize='',errorscale=1.,add_title=''):
if figsize=='':
plt.figure(figsize=(10,5))
else:
plt.figure(figsize=figsize)
if line: fmt='o-'
else: fmt='o'
plt.title(self.ident+' '+self.type+' '+add_title)
if self.type=='cphase':
plt.errorbar(self.time,self.cphase,errorscale*self.sigmaCP,fmt=fmt,capsize=5)
plt.ylabel('cphase [deg]')
elif self.type=='amp':
plt.errorbar(self.time,self.amp,errorscale*self.sigma,fmt=fmt,capsize=5)
plt.ylabel('amp')
elif self.type=='lcamp':
plt.errorbar(self.time,self.lcamp,errorscale*self.sigmaLCA,fmt=fmt,capsize=5)
plt.ylabel('log camp')
elif self.type=='lcfrac':
plt.errorbar(self.time,self.lcfrac,errorscale*self.sigmaLCF,fmt=fmt,capsize=5)
plt.ylabel('log cfracpol')
elif self.type=='cfrac':
plt.errorbar(self.time,self.cfrac,errorscale*self.sigmaCF,fmt=fmt,capsize=5)
plt.ylabel('cfracpol')
plt.grid()
plt.xlabel('time [h]')
plt.show()
def plot_compare(self,tser,line=False,figsize='',errorscale=1.,add_title=''):
if figsize=='':
plt.figure(figsize=(10,5))
else:
plt.figure(figsize=figsize)
if line: fmt='o-'
else: fmt='o'
plt.title(self.ident+' '+self.type+' '+add_title)
if self.type=='cphase':
plt.errorbar(self.time,self.cphase,errorscale*self.sigmaCP,fmt=fmt,capsize=5,label=self.ident)
plt.errorbar(tser.time,tser.cphase,errorscale*tser.sigmaCP,fmt=fmt,capsize=5,label=tser.ident)
plt.ylabel('cphase [deg]')
elif self.type=='amp':
plt.errorbar(self.time,self.amp,errorscale*self.sigma,fmt=fmt,capsize=5,label=self.ident)
plt.errorbar(tser.time,tser.amp,errorscale*tser.sigma,fmt=fmt,capsize=5,label=tser.ident)
plt.ylabel('amp')
elif self.type=='lcamp':
plt.errorbar(self.time,self.lcamp,errorscale*self.sigmaLCA,fmt=fmt,capsize=5,label=self.ident)
plt.errorbar(tser.time,tser.lcamp,errorscale*tser.sigmaLCA,fmt=fmt,capsize=5,label=tser.ident)
plt.ylabel('log camp')
elif self.type=='lcfrac':
plt.errorbar(self.time,self.lcfrac,errorscale*self.sigmaLCF,fmt=fmt,capsize=5,label=self.ident)
plt.errorbar(tser.time,tser.lcfrac,errorscale*tser.sigmaLCF,fmt=fmt,capsize=5,label=tser.ident)
plt.ylabel('log cfracpol')
elif self.type=='cfrac':
plt.errorbar(self.time,self.cfrac,errorscale*self.sigmaCF,fmt=fmt,capsize=5,label=self.ident)
plt.errorbar(tser.time,tser.cfrac,errorscale*tser.sigmaCF,fmt=fmt,capsize=5,label=tser.ident)
plt.ylabel('cfracpol')
plt.grid()
plt.xlabel('time [h]')
plt.legend()
plt.show()
def plot_compare_list(self,tserL,line=False,figsize='',errorscale=1.,add_title='',labelsL=None,err_cut=1e5,xrange=None,yrange=None):
if figsize=='':
plt.figure(figsize=(10,5))
else:
plt.figure(figsize=figsize)
if line: fmt='o-'
else: fmt='o'
if labelsL==None:
labelsL0 = [tser.ident for tser in tserL]
labelsL=[self.ident]+labelsL0
plt.title(self.ident+' '+self.type+' '+add_title)
if self.type=='cphase':
plt.errorbar(self.time[self.sigmaCP<err_cut],self.cphase[self.sigmaCP<err_cut],errorscale*self.sigmaCP[self.sigmaCP<err_cut],fmt=fmt,capsize=5,label=labelsL[0])
for cou,tser in enumerate(tserL):
plt.errorbar(tser.time[tser.sigmaCP<err_cut],tser.cphase[tser.sigmaCP<err_cut],errorscale*tser.sigmaCP[tser.sigmaCP<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1])
plt.ylabel('cphase [deg]')
elif self.type=='amp':
plt.errorbar(self.time[self.sigma<err_cut],self.amp[self.sigma<err_cut],errorscale*self.sigma[self.sigma<err_cut],fmt=fmt,capsize=5,label=labelsL[0])
for cou,tser in enumerate(tserL):
plt.errorbar(tser.time[tser.sigma<err_cut],tser.lcamp[tser.sigma<err_cut],errorscale*tser.sigma[tser.sigma<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1])
plt.ylabel('amp')
elif self.type=='lcamp':
plt.errorbar(self.time[self.sigmaLCA<err_cut],self.lcamp[self.sigmaLCA<err_cut],errorscale*self.sigmaLCA[self.sigmaLCA<err_cut],fmt=fmt,capsize=5,label=labelsL[0])
for cou,tser in enumerate(tserL):
plt.errorbar(tser.time[tser.sigmaLCA<err_cut],tser.lcamp[tser.sigmaLCA<err_cut],errorscale*tser.sigmaLCA[tser.sigmaLCA<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1])
plt.ylabel('log camp')
elif self.type=='lcfrac':
plt.errorbar(self.time[self.sigmaLCF<err_cut],self.lcfrac[self.sigmaLCF<err_cut],errorscale*self.sigmaLCF[self.sigmaLCF<err_cut],fmt=fmt,capsize=5,label=labelsL[0])
for cou,tser in enumerate(tserL):
plt.errorbar(tser.time[tser.sigmaLCF<err_cut],tser.lcfrac[tser.sigmaLCF<err_cut],errorscale*tser.sigmaLCF[tser.sigmaLCF<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1])
plt.ylabel('log cfracpol')
elif self.type=='cfrac':
plt.errorbar(self.time[self.sigmaCF<err_cut],self.cfrac[self.sigmaCF<err_cut],errorscale*self.sigmaCF[self.sigmaCF<err_cut],fmt=fmt,capsize=5,label=labelsL[0])
for cou,tser in enumerate(tserL):
plt.errorbar(tser.time[tser.sigmaCF<err_cut],tser.cfrac[tser.sigmaCF<err_cut],errorscale*tser.sigmaCF[tser.sigmaCF<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1])
plt.ylabel('cfracpol')
plt.grid()
plt.xlabel('time [h]')
if yrange!=None:
plt.ylim(yrange)
if xrange!=None:
plt.xlim(xrange)
plt.legend()
plt.show()
def hist(self,figsize='',perc=2.,show_normal=True):
if figsize=='':
plt.figure(figsize=(10,5))
else:
plt.figure(figsize=figsize)
if self.type=='cphase':
x=self.cphase
err=self.sigmaCP
rel_cl = self.cphase/self.sigmaCP
plt.xlabel('(closure phase) / (estimated error)')
elif self.type=='lcamp':
x=self.lcamp
err=self.sigmaLCA
rel_cl = self.lcamp/self.sigmaLCA
plt.xlabel('(log closure amp) / (estimated error)')
elif self.type=='amp':
x=(self.amp-np.mean(self.amp))
err=self.sigma
rel_cl = (self.amp-np.mean(self.amp))/self.sigma
plt.xlabel('(amp - mean amp) / (estimated error)')
binL = np.percentile(rel_cl,perc)
binR = np.percentile(rel_cl,100.-perc)
binDist = np.abs(binR-binL)
binR = binR + 0.1*binDist
binL = binL - 0.1*binDist
bins = np.linspace(binL,binR,int(1.2*np.sqrt(len(rel_cl))))
plt.hist(rel_cl,bins=bins,normed=True)
if show_normal:
plt.axvline(0,color='k',linestyle='--')
xg = np.linspace(binL, binR,128)
plt.plot(xg,1/np.sqrt(2.*np.pi)*np.exp(-xg**2/2.),'k--')
plt.title(self.ident)
plt.grid()
plt.show()
print('MAD0: ', 1.4826*np.median(np.abs(rel_cl)))
print('MEDIAN ABSOLUTE: ',np.median(np.abs(x)))
print('MEDIAN NORMALIZED: ', np.median(rel_cl))
print('MEDIAN ABSOLUTE:',np.median(x))
print('MEDIAN THERMAL ERROR: ', np.median(err))
print('VARIATION: ',np.std(x) )
def qmetric(self):
if self.type=='amp':
x = self.amp
err_x = self.sigma
if self.type=='cphase':
x = self.cphase
err_x = self.sigmaCP
if self.type=='lcamp':
x = self.lcamp
err_x = self.sigmaLCA
q,dq = qmetric.qmetric(self.time,x,err_x,product=self.type)
return q,dq
def save_csv(self,name_out,columns='default',sep=',',header=False):
if columns=='default':
if self.type=='amp':
columns=['mjd','amp','sigma']
elif self.type=='cphase':
columns=['mjd','cphase','sigmaCP']
elif self.type=='lcamp':
columns=['mjd','lcamp','sigmaLCA']
elif self.type=='lcfrac':
columns=['mjd','lcfrac','sigmaLCF']
elif self.type=='cfrac':
columns=['mjd','cfrac','sigmaCF']
self.data[columns].to_csv(name_out,index=False,header=header,sep=sep)
def get_cphase(tobs,triangle,polar='none'):
if polar != 'none':
polvis=pol_dic[polar]+'vis'
polsnr=pol_dic[polar]+'snr'
else: polvis='vis'; polsnr='snr'
# tobs.df=tobs.df[tobs.df.polarization==polar].copy()
baseL=list(tobs.df.baseline.unique())
#determine order stations
b=[triangle[0]+triangle[1],triangle[1]+triangle[2],triangle[2]+triangle[0]]
sign=[0,0,0]
baseT=b
for cou in range(3):
if (b[cou] in baseL)&(b[cou][::-1] not in baseL):
sign[cou]=1
elif (b[cou] not in baseL)&(b[cou][::-1] in baseL):
sign[cou]=-1
baseT[cou]= b[cou][::-1]
#print(tobs.df.columns)
#print(baseT)
foo=tobs.df[list(map(lambda x: x in baseT, tobs.df.baseline))]
#print('mjd',foo.columns)
foo=foo.groupby('mjd').filter(lambda x: len(x)==3)
fooB0=foo[foo.baseline==baseT[0]].sort_values('mjd').copy()
fooB1=foo[foo.baseline==baseT[1]].sort_values('mjd').copy()
fooB2=foo[foo.baseline==baseT[2]].sort_values('mjd').copy()
foo_out=fooB0[['time','datetime','mjd']].copy()
foo_out['u1'] = np.asarray(fooB0['u'])
foo_out['v1'] = np.asarray(fooB0['v'])
foo_out['vis1'] = np.asarray(fooB0[polvis])
if sign[0]==-1:
foo_out['vis1'] = np.asarray(foo_out['vis1']).conj()
foo_out['snr1'] = np.asarray(fooB0[polsnr])
foo_out['u2'] = np.asarray(fooB1['u'])
foo_out['v2'] = np.asarray(fooB1['v'])
foo_out['vis2'] = np.asarray(fooB1[polvis])
if sign[1]==-1:
foo_out['vis2'] = np.asarray(foo_out['vis2']).conj()
foo_out['snr2'] = np.asarray(fooB1[polsnr])
foo_out['u3'] = np.asarray(fooB2['u'])
foo_out['v3'] = np.asarray(fooB2['v'])
foo_out['vis3'] = np.asarray(fooB2[polvis])
if sign[2]==-1:
foo_out['vis3'] = np.asarray(foo_out['vis3']).conj()
foo_out['snr3'] = np.asarray(fooB2[polsnr])
foo_out['cphase'] = (180./np.pi)*np.angle( foo_out['vis1']* foo_out['vis2']*foo_out['vis3'])
foo_out['sigmaCP'] = (180./np.pi)*np.sqrt(1./foo_out['snr1']**2 + 1./foo_out['snr2']**2 + 1./foo_out['snr3']**2)
return foo_out
def get_lcamp(tobs,quadrangle,polar='none'):
if polar != 'none':
polvis=pol_dic[polar]+'vis'
polsnr=pol_dic[polar]+'snr'
else: polvis='vis'; polsnr='snr'
baseL=list(tobs.df.baseline.unique())
b=[quadrangle[0]+quadrangle[1],quadrangle[2]+quadrangle[3],quadrangle[0]+quadrangle[2],quadrangle[1]+quadrangle[3]]
baseQ=b
for cou in range(4):
if (b[cou] not in baseL)&(b[cou][::-1] in baseL):
baseQ[cou]= b[cou][::-1]
foo=tobs.df[list(map(lambda x: (x in baseQ), tobs.df.baseline))]
foo=foo.groupby('mjd').filter(lambda x: len(x)==4)
fooB0=foo[foo.baseline==baseQ[0]].sort_values('mjd').copy()
fooB1=foo[foo.baseline==baseQ[1]].sort_values('mjd').copy()
fooB2=foo[foo.baseline==baseQ[2]].sort_values('mjd').copy()
fooB3=foo[foo.baseline==baseQ[3]].sort_values('mjd').copy()
foo_out=fooB0[['time','datetime','mjd']].copy()
foo_out['u1'] = np.asarray(fooB0['u'])
foo_out['v1'] = np.asarray(fooB0['v'])
foo_out['vis1'] = np.asarray(fooB0[polvis])
foo_out['snr1'] = np.asarray(fooB0[polsnr])
foo_out['u2'] = np.asarray(fooB1['u'])
foo_out['v2'] = np.asarray(fooB1['v'])
foo_out['vis2'] = np.asarray(fooB1[polvis])
foo_out['snr2'] = np.asarray(fooB1[polsnr])
foo_out['u3'] = np.asarray(fooB2['u'])
foo_out['v3'] = np.asarray(fooB2['v'])
foo_out['vis3'] = np.asarray(fooB2[polvis])
foo_out['snr3'] = np.asarray(fooB2[polsnr])
foo_out['u4'] = np.asarray(fooB3['u'])
foo_out['v4'] = np.asarray(fooB3['v'])
foo_out['vis4'] = np.asarray(fooB3[polvis])
foo_out['snr4'] = np.asarray(fooB3[polsnr])
foo_out['lcamp'] = np.log(np.abs(foo_out['vis1'])) + np.log(np.abs(foo_out['vis2'])) - np.log(np.abs(foo_out['vis3'])) - np.log(np.abs(foo_out['vis4']))
foo_out['sigmaLCA'] = np.sqrt(1./foo_out['snr1']**2 + 1./foo_out['snr2']**2 + 1./foo_out['snr3']**2 + 1./foo_out['snr4']**2)
return foo_out
def get_lcfrac(tobs,baseline):
baseL=list(tobs.df.baseline.unique())
#print(baseL)
if baseline not in baseL:
if baseline[1]+baseline[0] in baseL:
print('Using transposed baseline')
baseline=baseline[1]+baseline[0]
else: print('No such baseline')
foo = tobs.df[tobs.df.baseline==baseline]
if tobs.polrep=='circ':
foo.dropna(axis=0, subset=['rrvis','rlvis','llvis','lrvis','rrsigma','llsigma','lrsigma','rlsigma','rrsnr','llsnr','lrsnr','rlsnr'], inplace=True)
foo_out=foo[['time','datetime','mjd']].copy()
foo_out['u'] = np.asarray(foo['u'])
foo_out['v'] = np.asarray(foo['v'])
debias=True
if debias==True:
foo['rlvis'] = foo['rlvis']*np.sqrt(np.abs(foo['rlvis'])**2 - np.abs(foo['rlsigma'])**2)/np.abs(foo['rlvis'])
foo['lrvis'] = foo['lrvis']*np.sqrt(np.abs(foo['lrvis'])**2 - np.abs(foo['lrsigma'])**2)/np.abs(foo['lrvis'])
foo['rrvis'] = foo['rrvis']*np.sqrt(np.abs(foo['rrvis'])**2 - np.abs(foo['rrsigma'])**2)/np.abs(foo['rrvis'])
foo['llvis'] = foo['llvis']*np.sqrt(np.abs(foo['llvis'])**2 - np.abs(foo['llsigma'])**2)/np.abs(foo['llvis'])
foo_out['lcfrac'] = np.log(np.abs(foo['rlvis'])) + np.log(np.abs(foo['lrvis'])) - np.log(np.abs(foo['rrvis'])) - np.log(np.abs(foo['llvis']))
foo_out['sigmaLCF'] = np.sqrt(1./foo['llsnr']**2 + 1./foo['rrsnr']**2 + 1./foo['lrsnr']**2 + 1./foo['rlsnr']**2)
return foo_out
def get_cfrac(tobs,baseline):
baseL=list(tobs.df.baseline.unique())
if baseline not in baseL:
if baseline[1]+baseline[0] in baseL:
print('Using transposed baseline')
baseline=baseline[1]+baseline[0]
else: print('No such baseline')
foo = tobs.df[tobs.df.baseline==baseline]
if tobs.polrep=='circ':
foo.dropna(axis=0, subset=['rrvis','rlvis','llvis','lrvis','rrsigma','llsigma','lrsigma','rlsigma','rrsnr','llsnr','lrsnr','rlsnr'], inplace=True)
foo_out=foo[['time','datetime','mjd']].copy()
foo_out['u'] = np.asarray(foo['u'])
foo_out['v'] = np.asarray(foo['v'])
debias=True
if debias==True:
foo['rlvis'] = foo['rlvis']*np.sqrt(np.abs(foo['rlvis'])**2 - np.abs(foo['rlsigma'])**2)/np.abs(foo['rlvis'])
foo['lrvis'] = foo['lrvis']*np.sqrt(np.abs(foo['lrvis'])**2 - np.abs(foo['lrsigma'])**2)/np.abs(foo['lrvis'])
foo['rrvis'] = foo['rrvis']*np.sqrt(np.abs(foo['rrvis'])**2 - np.abs(foo['rrsigma'])**2)/np.abs(foo['rrvis'])
foo['llvis'] = foo['llvis']*np.sqrt(np.abs(foo['llvis'])**2 - np.abs(foo['llsigma'])**2)/np.abs(foo['llvis'])
foo_out['cfrac'] = np.sqrt((np.abs(foo['rlvis']))*(np.abs(foo['lrvis']))/(np.abs(foo['rrvis']))/(np.abs(foo['llvis'])))
foo_out['sigmaCF'] = 0.5*(foo_out['cfrac'])*np.sqrt(1./foo['llsnr']**2 + 1./foo['rrsnr']**2 + 1./foo['lrsnr']**2 + 1./foo['rlsnr']**2)
return foo_out
def make_df_full_cp(obs,round_s=0.1):
"""converts visibilities from obs.data to DataFrame format
Args:
obs: ObsData object
round_s: accuracy of datetime object in seconds
polarization: just label for polarization
save_polar: what to do about different polarizations, if
Returns:
df: observation visibility data in DataFrame format
"""
sour=obs.source
df = pd.DataFrame(data=obs.data)
df['fmjd'] = df['time']/24.
df['mjd'] = obs.mjd + df['fmjd']
telescopes = list(zip(df['t1'],df['t2']))
telescopes = [(x[0],x[1]) for x in telescopes]
df['baseline'] = [x[0]+'-'+x[1] for x in telescopes]
df['amp'] = list(map(np.abs,df['vis']))
df['phase'] = list(map(lambda x: (180./np.pi)*np.angle(x),df['vis']))
df['datetime'] = Time(df['mjd'], format='mjd').datetime
df['datetime'] =list(map(lambda x: round_time(x,round_s=round_s),df['datetime']))
df['jd'] = Time(df['mjd'], format='mjd').jd
#df['snr'] = df['amp']/df['sigma']
quantities=['llamp','rramp','rlamp','lramp','llsigma','rrsigma','rlsigma','lrsigma','rrphase','llphase','rlphase','lrphase']
for quantity in quantities:
df[quantity] = [x[0] for x in obs.unpack(quantity)]
df['source'] = sour
df['baselength'] = np.sqrt(np.asarray(df.u)**2+np.asarray(df.v)**2)
basic_columns = list(set(df.columns)-set(quantities))
dfrr=df[basic_columns+['rramp','rrphase','rrsigma']].copy()
dfrr['amp']=dfrr['rramp']
dfrr['phase']=dfrr['rrphase']
dfrr['sigma']=dfrr['rrsigma']
dfrr=dfrr[basic_columns]
dfrr['polarization']='RR'
dfll=df[basic_columns+['llamp','llphase','llsigma']].copy()
dfll['amp']=dfll['llamp']
dfll['phase']=dfll['llphase']
dfll['sigma']=dfll['llsigma']
dfll=dfll[basic_columns]
dfll['polarization']='LL'
dflr=df[basic_columns+['lramp','lrphase','lrsigma']].copy()
dflr['amp']=dflr['lramp']
dflr['phase']=dflr['lrphase']
dflr['sigma']=dflr['lrsigma']
dflr=dflr[basic_columns]
dflr['polarization']='LR'
dfrl=df[basic_columns+['rlamp','rlphase','rlsigma']].copy()
dfrl['amp']=dfrl['rlamp']
dfrl['phase']=dfrl['rlphase']
dfrl['sigma']=dfrl['rlsigma']
dfrl=dfrl[basic_columns]
dfrl['polarization']='RL'
df = pd.concat()
return df
def round_time(t,round_s=0.1):
"""rounding time to given accuracy
Args:
t: time
round_s: delta time to round to in seconds
Returns:
round_t: rounded time
"""
t0 = datetime.datetime(t.year,1,1)
foo = t - t0
foo_s = foo.days*24*3600 + foo.seconds + foo.microseconds*(1e-6)
foo_s = np.round(foo_s/round_s)*round_s
days = np.floor(foo_s/24/3600)
seconds = np.floor(foo_s - 24*3600*days)
microseconds = int(1e6*(foo_s - days*3600*24 - seconds))
round_t = t0+datetime.timedelta(days,seconds,microseconds)
return round_t
def save_all_products(pathf,path_out,special_name,get_what=['AMP','CP','LCA','CF'],get_pol=['LL','RR'],min_elem=100.,cadence=-1,polrep='circ',columns='default'):
if get_pol==None: get_pol=[None]
for pol in get_pol:
tobs = load_uvfits(pathf,tcoh=cadence,polar=pol,polrep=polrep)
if pol==None: pol=''
stations = list(set(''.join(tobs.df.baseline)))
stations = [x for x in stations if x!='R']
#print(stations)
#print(tobs.baseline.unique())
if 'AMP' in get_what:
print('Saving visibility amplitudes time series...')
if not os.path.exists(path_out+'AMP'):
os.makedirs(path_out+'AMP')
#baseL=sorted([x[0]+x[1] for x in itertools.combinations(stations,2)])
baseL = tobs.df.baseline.unique()
for base in baseL:
tser = tseries(tobs,base,product='amp')
if len(tser.mjd)>min_elem:
tser.save_csv(path_out+'AMP/'+special_name+'_'+tser.source+'_'+base+'_'+pol+'.csv',columns=columns)
if 'CP' in get_what:
print('Saving closure phase time series...')
if not os.path.exists(path_out+'CP'):
os.makedirs(path_out+'CP')
triangleL=sorted([x[0]+x[1]+x[2] for x in itertools.combinations(stations,3)])
for tri in triangleL:
tser = tseries(tobs,tri,product='cphase')
if len(tser.mjd)>min_elem:
tser.save_csv(path_out+'CP/'+special_name+'_'+tser.source+'_'+tri+'_'+pol+'.csv',columns=columns)
if 'LCA' in get_what:
print('Saving log closure amplitude time series...')
if not os.path.exists(path_out+'LCA'):
os.makedirs(path_out+'LCA')
quadrangleL1=sorted([x[0]+x[1]+x[2]+x[3] for x in itertools.combinations(stations,4)])
quadrangleL2=sorted([x[0]+x[3]+x[1]+x[2] for x in itertools.combinations(stations,4)])
quadrangleL=quadrangleL1+quadrangleL2
for quad in quadrangleL:
tser = tseries(tobs,quad,product='lcamp')
if len(tser.mjd)>min_elem:
tser.save_csv(path_out+'LCA/'+special_name+'_'+tser.source+'_'+quad+'_'+pol+'.csv',columns=columns)
if 'LCF' in get_what:
print('Saving log closure fracpol time series...')
if not os.path.exists(path_out+'LCF'):
os.makedirs(path_out+'LCF')
baseL = tobs.df.baseline.unique()
baseL = [base for base in baseL if 'R' not in base]
for base in baseL:
tser = tseries(tobs,base,product='lcfrac')
if len(tser.mjd)>min_elem:
tser.save_csv(path_out+'LCF/'+special_name+'_'+tser.source+'_'+base+'.csv',columns=columns)
if 'CF' in get_what:
print('Saving closure fracpol time series...')
if not os.path.exists(path_out+'CF'):
os.makedirs(path_out+'CF')
baseL = tobs.df.baseline.unique()
baseL = [base for base in baseL if 'R' not in base]
#print(baseL)
for base in baseL:
#print('base ', base)
tser = tseries(tobs,base,product='cfrac')
#print(base,np.shape(tser.data))
if len(tser.mjd)>min_elem:
tser.save_csv(path_out+'CF/'+special_name+'_'+tser.source+'_'+base+'.csv',columns=columns) | [
"maciek.wielgus@gmail.com"
] | maciek.wielgus@gmail.com |
b041d27ad67048f098504f32a777272c2c0183c7 | 99218b477267dafe4b9e37a17df7f1cd7af28c78 | /fynd_test/wsgi.py | 23222bda9e292bc12fb0264613d4a79352997fdc | [] | no_license | nattesharan/fynd_test | da3367641cdd9d46220ba881680c7b809539d006 | 45d21e509379bb643630e374f9a31a9454f9f746 | refs/heads/master | 2022-12-01T17:49:38.299113 | 2020-08-16T16:19:23 | 2020-08-16T16:19:23 | 287,949,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for fynd_test project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fynd_test.settings')
application = get_wsgi_application()
| [
"nattesharan@gmail.com"
] | nattesharan@gmail.com |
43523188442bee7be5573d5bf87f3806b4f092da | 2d57b2c9849ba79889dfda2ba7a219c594f78331 | /pyalgos/algorithms/sorting/heap.py | 3839037a8e820383945d12e32b6a82db76eeedd7 | [] | no_license | olibrook/data-structures-algos-python | a96fcc49ca212aaa6dda256469dc18f22abcde9d | be9f058c9b8876a2c61a55d47a86b4e4f2cb8077 | refs/heads/master | 2021-07-13T08:30:57.794216 | 2020-06-01T00:45:06 | 2020-06-01T00:45:06 | 138,454,453 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import pyalgos.data_structures.heap as h
def sort(arr):
heap = h.MinHeap()
for x in arr:
heap.add(x)
return list(heap)
| [
"olibrook@gmail.com"
] | olibrook@gmail.com |
78ea76695ffd009815497005787fb0cf035548fd | c2e1a41198ea5e7c18864848564ae68610969dd6 | /upload/migrations/0015_fscjob_uniquefolder.py | 33bff5e70f43264fa5061c2a13f87bacdab0a7db | [
"MIT"
] | permissive | carl9384/fscupload | cda0794ba56557d0cdeb0df1a573d1c09e9678a3 | b0dd73014294f1a6bb01ed946489b52141cadb76 | refs/heads/master | 2021-06-25T14:17:21.348312 | 2019-02-11T02:02:39 | 2019-02-11T02:02:39 | 98,359,279 | 0 | 1 | MIT | 2019-02-10T17:38:51 | 2017-07-25T23:46:58 | Python | UTF-8 | Python | false | false | 486 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-26 20:52
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('upload', '0014_auto_20170725_2336'),
]
operations = [
migrations.AddField(
model_name='fscjob',
name='uniquefolder',
field=models.CharField(default='69d88c23e083deaf1bb8', max_length=20),
),
]
| [
"cnegro@nysbc.org"
] | cnegro@nysbc.org |
77dd5abb723497730fd82ff12025e54f2471e189 | 6a3805c0e3a1f1a17dafde998b07dff724fa3432 | /fixedwing/migrations/0004_fixedwingpost_user_name.py | 4bb82ea7b542959155849a5839cc370334e570f7 | [
"MIT"
] | permissive | shivam675/avation | de85bd7bceb88f4567f2d549c914b6bee978d216 | 9f39a9fbeb2ad7d27d88816b0085cdd814b9ae35 | refs/heads/main | 2023-04-14T14:54:20.213381 | 2021-05-02T10:35:05 | 2021-05-02T10:35:05 | 358,865,969 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 3.2 on 2021-04-21 07:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('fixedwing', '0003_remove_fixedwingpost_user_name'),
]
operations = [
migrations.AddField(
model_name='fixedwingpost',
name='user_name',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"samarkale.15@gmail.com"
] | samarkale.15@gmail.com |
249d4b83823c45d4de6a19805f21df7c0519a7d1 | 61c7984360e25ee54b02340ef81735529df520ba | /OnShift-PythonBootcamp-Session5/steps/fizzbuzz_steps.py | 364a3f1c5271bc8ef3af29a92b074f5a9ba8ca6e | [] | no_license | samsawan/onshift_python_bootcamp | 6711c8d84c9622ed9fd7a0afe9c68d567c45465f | b7ade8a8a0b571364eacabedb3bd7519130313d4 | refs/heads/master | 2021-01-25T09:43:34.370572 | 2018-03-06T15:12:10 | 2018-03-06T15:12:10 | 123,310,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | from behave import *
use_step_matcher("re")
@given("I have a number (?P<input_num>.+)")
def step_impl(context, input_num):
"""
:type context: behave.runner.Context
:type input_num: str
"""
raise NotImplementedError
@when("I call Fizzbuzz")
def step_impl(context):
"""
:type context: behave.runner.Context
"""
raise NotImplementedError
@then("I should get back (?P<output_exp>.+)")
def step_impl(context, output_exp):
"""
:type context: behave.runner.Context
:type output_exp: str
"""
raise NotImplementedError
| [
"ssawan@onshift.com"
] | ssawan@onshift.com |
04b9b53630dcc36bb3cdb1b9f364d0194d7e120c | 1eb9f084434b7e8f5af5f262f36ad57abfa888e5 | /lib/ros_comm-1.12.0/utilities/message_filters/src/message_filters/__init__.py | 9cc21c48c886fca665feec34887a30117e564e26 | [
"BSD-3-Clause"
] | permissive | MangoMangoDevelopment/neptune | 2c30d641829cf473e74c5bd8b6fa6c7258d0e0a4 | 185f501fb227f0ec3700e29adcd7be0bac85f49e | refs/heads/master | 2020-05-29T18:08:08.728630 | 2017-04-13T15:10:42 | 2017-04-13T15:10:42 | 51,020,825 | 10 | 5 | null | 2017-10-25T16:01:32 | 2016-02-03T18:27:49 | Python | UTF-8 | Python | false | false | 8,509 | py | # Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Message Filter Objects
======================
"""
import itertools
import threading
import rospy
class SimpleFilter(object):
def __init__(self):
self.callbacks = {}
def registerCallback(self, cb, *args):
"""
Register a callback function `cb` to be called when this filter
has output.
The filter calls the function ``cb`` with a filter-dependent list of arguments,
followed by the call-supplied arguments ``args``.
"""
conn = len(self.callbacks)
self.callbacks[conn] = (cb, args)
return conn
def signalMessage(self, *msg):
for (cb, args) in self.callbacks.values():
cb(*(msg + args))
class Subscriber(SimpleFilter):
"""
ROS subscription filter. Identical arguments as :class:`rospy.Subscriber`.
This class acts as a highest-level filter, simply passing messages
from a ROS subscription through to the filters which have connected
to it.
"""
def __init__(self, *args, **kwargs):
SimpleFilter.__init__(self)
self.topic = args[0]
kwargs['callback'] = self.callback
self.sub = rospy.Subscriber(*args, **kwargs)
def callback(self, msg):
self.signalMessage(msg)
def getTopic(self):
return self.topic
def __getattr__(self, key):
"""Serve same API as rospy.Subscriber"""
return self.sub.__getattribute__(key)
class Cache(SimpleFilter):
"""
Stores a time history of messages.
Given a stream of messages, the most recent ``cache_size`` messages
are cached in a ring buffer, from which time intervals of the cache
can then be retrieved by the client.
"""
def __init__(self, f, cache_size = 1):
SimpleFilter.__init__(self)
self.connectInput(f)
self.cache_size = cache_size
# Array to store messages
self.cache_msgs = []
# Array to store msgs times, auxiliary structure to facilitate
# sorted insertion
self.cache_times = []
def connectInput(self, f):
self.incoming_connection = f.registerCallback(self.add)
def add(self, msg):
# Cannot use message filters with non-stamped messages
if not hasattr(msg, 'header') or not hasattr(msg.header, 'stamp'):
rospy.logwarn("Cannot use message filters with non-stamped messages")
return
# Insert sorted
stamp = msg.header.stamp
self.cache_times.append(stamp)
self.cache_msgs.append(msg)
# Implement a ring buffer, discard older if oversized
if (len(self.cache_msgs) > self.cache_size):
del self.cache_msgs[0]
del self.cache_times[0]
# Signal new input
self.signalMessage(msg)
def getInterval(self, from_stamp, to_stamp):
"""Query the current cache content between from_stamp to to_stamp."""
assert from_stamp <= to_stamp
return [m for m in self.cache_msgs
if m.header.stamp >= from_stamp and m.header.stamp <= to_stamp]
def getElemAfterTime(self, stamp):
"""Return the oldest element after or equal the passed time stamp."""
newer = [m for m in self.cache_msgs if m.header.stamp >= stamp]
if not newer:
return None
return newer[0]
def getElemBeforeTime(self, stamp):
"""Return the newest element before or equal the passed time stamp."""
older = [m for m in self.cache_msgs if m.header.stamp <= stamp]
if not older:
return None
return older[-1]
def getLastestTime(self):
"""Return the newest recorded timestamp."""
if not self.cache_times:
return None
return self.cache_times[-1]
def getOldestTime(self):
"""Return the oldest recorded timestamp."""
if not self.cache_times:
return None
return self.cache_times[0]
class TimeSynchronizer(SimpleFilter):
"""
Synchronizes messages by their timestamps.
:class:`TimeSynchronizer` synchronizes incoming message filters by the
timestamps contained in their messages' headers. TimeSynchronizer
listens on multiple input message filters ``fs``, and invokes the callback
when it has a collection of messages with matching timestamps.
The signature of the callback function is::
def callback(msg1, ... msgN):
where N is the number of input message filters, and each message is
the output of the corresponding filter in ``fs``.
The required ``queue size`` parameter specifies how many sets of
messages it should store from each input filter (by timestamp)
while waiting for messages to arrive and complete their "set".
"""
def __init__(self, fs, queue_size):
SimpleFilter.__init__(self)
self.connectInput(fs)
self.queue_size = queue_size
self.lock = threading.Lock()
def connectInput(self, fs):
self.queues = [{} for f in fs]
self.input_connections = [f.registerCallback(self.add, q) for (f, q) in zip(fs, self.queues)]
def add(self, msg, my_queue):
self.lock.acquire()
my_queue[msg.header.stamp] = msg
while len(my_queue) > self.queue_size:
del my_queue[min(my_queue)]
# common is the set of timestamps that occur in all queues
common = reduce(set.intersection, [set(q) for q in self.queues])
for t in sorted(common):
# msgs is list of msgs (one from each queue) with stamp t
msgs = [q[t] for q in self.queues]
self.signalMessage(*msgs)
for q in self.queues:
del q[t]
self.lock.release()
class ApproximateTimeSynchronizer(TimeSynchronizer):
"""
Approximately synchronizes messages by their timestamps.
:class:`ApproximateTimeSynchronizer` synchronizes incoming message filters by the
timestamps contained in their messages' headers. The API is the same as TimeSynchronizer
except for an extra `slop` parameter in the constructor that defines the delay (in seconds)
with which messages can be synchronized
"""
def __init__(self, fs, queue_size, slop):
TimeSynchronizer.__init__(self, fs, queue_size)
self.slop = rospy.Duration.from_sec(slop)
def add(self, msg, my_queue):
self.lock.acquire()
my_queue[msg.header.stamp] = msg
while len(my_queue) > self.queue_size:
del my_queue[min(my_queue)]
for vv in itertools.product(*[list(q.keys()) for q in self.queues]):
qt = list(zip(self.queues, vv))
if ( ((max(vv) - min(vv)) < self.slop) and
(len([1 for q,t in qt if t not in q]) == 0) ):
msgs = [q[t] for q,t in qt]
self.signalMessage(*msgs)
for q,t in qt:
del q[t]
self.lock.release()
| [
"a.wareham@gmail.com"
] | a.wareham@gmail.com |
9902c8bd932c3cc78c49ecf1c4375db30bcea633 | 692c7bc11ca9718f58860fa0016571f63d928d55 | /src/generators/SemanticFusion/parsing.py | fcceeb73f53938b0e8cc0d4756cd7d66532d8d50 | [
"MIT"
] | permissive | bingoko/yinyang | 9548a8ed949c1519502acfcb734629f4d300c56b | ba709fa78f0f30954923f03eb2e15aa5b1311717 | refs/heads/master | 2023-01-08T22:50:05.746526 | 2020-11-15T03:13:46 | 2020-11-15T03:13:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,120 | py | import random
from src.generators.SemanticFusion.Symbol import Symbol, Symbols, MetamorphicTuple
def get_script_from_file(file):
with open(file,"r") as reader:
script = reader.read()
return script
def clean_bv(string):
string = string.replace("BV{","(_ BitVec ")
string = string.replace("}",")")
return string
def unsupported_file(fname):
with open(fname) as fp:
for i in range(0,4):
line = fp.readline()
if "Constructed by Trevor Hansen to test edge case parsing" in line:
return True
def get_num_variables(fname):
with open(fname) as fp:
data = fp.readlines()
count = "".join(data).count("declare-fun")
return count
def string2file(fn, string):
with open(fn, "w") as f: f.write(string)
def warning_or_error(stdout, stderr):
stdstream = stdout + " " + stderr
if "model is not available" in stdstream or "Cannot get model" in stdstream: return False
return "LEAKED" in stdstream or \
"Leaked" in stdstream or \
"Segmentation fault" in stdstream or \
"segmentation fault" in stdstream or \
"segfault" in stdstream or \
"ASSERTION VIOLATION" in stdstream or \
"(error" in stdstream or \
"Assertion failure" in stdstream or \
"Fatal failure" in stdstream or \
"Internal error detected" in stdstream or \
"an invalid model was generated" in stdstream or \
"Failed to verify" in stdstream or \
"failed to verify" in stdstream or \
"ERROR: AddressSanitizer:" in stdstream
def get_commands(script):
commands = []
bracket_counter = -1
bracket_content = ""
in_quote = False
for c in script:
if c == '"':
in_quote = not in_quote
if not in_quote:
if c == '(':
if bracket_counter > 0:
bracket_counter += 1
elif bracket_counter == -1:
bracket_counter = 1
else:
print("invalid formula")
exit(1)
elif c == ")":
bracket_counter -= 1
if bracket_counter == 0:
commands.append(bracket_content+")")
bracket_content = ""
bracket_counter = -1
if bracket_counter != -1: bracket_content += c
return commands
def decompose(script):
commands = get_commands(script)
logic = [c for c in commands if c.startswith("(set-logic")]
if len(logic) > 0:
logic = logic[0]
else:
logic = []
decl_consts = [c for c in commands if c.startswith("(declare-const")]
decl_funcs = [c for c in commands if c.startswith("(declare-fun")]
def_funcs = [c for c in commands if c.startswith("(define-fun")]
def_sorts= [c for c in commands if c.startswith("(declare-sort")]
asserts = [c for c in commands if c.startswith("(assert")]
return "".join(logic), list(def_sorts), list(decl_consts),\
list(decl_funcs), list(def_funcs), list(asserts)
def get_symbol(declaration):
#get symbol
prefix = declaration.split(" ")[0]
symbol = ""
index = declaration.find(prefix)+len(prefix)
while declaration[index] == " " or declaration[index] == "\n":
index += 1
if declaration[index] == "|":
bracket_counter = 1
while bracket_counter != 0:
index += 1
if declaration[index] == "|": bracket_counter -= 1
elif bracket_counter != 0:
symbol = symbol + declaration[index]
symbol = "|%s|" %symbol
else:
while declaration[index] != " " and declaration[index] != "\n":
symbol = symbol + declaration[index]
index += 1
index = declaration.find(symbol)+len(symbol)
s_type = ""
while declaration[index] == " ":
index += 1
if declaration[index] == "(":
bracket_counter = 1
while bracket_counter != 0:
index += 1
if declaration[index] == "(": bracket_counter += 1
elif declaration[index] == ")": bracket_counter -= 1
if declaration[index] == ")": index += 1
while declaration[index] == " ":
index += 1
if declaration[index] == "(":
bracket_counter = 1
while bracket_counter != 0:
index += 1
if declaration[index] == "(":
bracket_counter += 1
elif declaration[index] == ")":
bracket_counter -= 1
s_type = s_type + declaration[index]
s_type = "(%s" %s_type
else:
while declaration[index] != " " and declaration[index] != "\n" and declaration[index] != ")":
s_type = s_type + declaration[index]
index += 1
# #get type
# index = -2
# s_type = ""
# while declaration[index] == " ":
# index -= 1
# if declaration[index] == ")":
# bracket_counter = 1
# while bracket_counter != 0:
# index -= 1
# if declaration[index] == ")": bracket_counter += 1
# elif declaration[index] == "(": bracket_counter -= 1
# if bracket_counter != 0:
# s_type = declaration[index] + s_type
# s_type = "(%s)" %s_type
# else:
# while declaration[index] != " ":
# s_type = declaration[index] + s_type
# index -= 1
# print(s_type)
return Symbol(symbol, s_type)
def fun_has_arguments(line):
if ("(declare-fun" in line or "(define-fun" in line) and not "()" in line:
return True
return False
def get_symbols(string, only_zero_valued_funcs=False):
commands = get_commands(string)
symbols = []
declarations = [c for c in commands if c.startswith("(declare-const") or\
c.startswith("(declare-fun")\
or c.startswith("(define-fun")]
for declaration in declarations:
if fun_has_arguments(declaration): continue
symbols.append(get_symbol(declaration))
return Symbols(symbols)
def get_declared_symbols(string, only_zero_valued_funcs=False):
commands = get_commands(string)
symbols = []
declarations = [c for c in commands if c.startswith("(declare-const") or\
c.startswith("(declare-fun")\
or c.startswith("(define-fun")]
for declaration in declarations:
symbols.append(get_symbol(declaration))
return Symbols(symbols)
def disjunction(script1, script2):
"""
Disjunction of two SMT scripts
Assumption: script1 and script2 have no shared variables
"""
_,decl_sorts1, decl_consts1, decl_funcs1, def_funcs1, asserts1 = decompose(script1)
_,decl_sorts2, decl_consts2, decl_funcs2, def_funcs2, asserts2 = decompose(script2)
sorts = list(set(decl_sorts1).union(set(decl_sorts2)))
disjunction = "".join(sorts) + "".join(decl_consts1) + "".join(decl_consts2)\
+ "".join(decl_funcs1) + "".join(decl_funcs2) + "".join(def_funcs1) + "".join(def_funcs2)
conjunction1 = " (and"
random.shuffle(asserts1)
for assertion in asserts1:
assertion = assertion.strip("(assert")
assertion = assertion[:assertion.rfind(")")]
conjunction1 += assertion
conjunction1 += ")"
conjunction2 = " (and"
random.shuffle(asserts2)
for assertion in asserts2:
assertion = assertion.strip("(assert")
assertion = assertion[:assertion.rfind(")")]
conjunction2 += assertion
conjunction2 += ")"
disjunction += "(assert (or %s %s))" %(conjunction1,conjunction2)
return disjunction
def random_map(symbols1, symbols2):
metamophic_tuples = []
symbols2_type_list = []
for symbol2 in symbols2.symbols:
if symbol2.type not in symbols2_type_list:
symbols2_type_list.append(symbol2.type)
for symbol1 in symbols1.symbols:
if symbol1.type not in symbols2_type_list:
continue
symbol2 = random.choice(symbols2.symbols)
while symbol1.type != symbol2.type:
symbol2 = random.choice(symbols2.symbols)
metamophic_tuples.append(MetamorphicTuple(symbol1, symbol2))
return metamophic_tuples
def ranking_map(symbol1, symbol2, script1, script2):
metamophic_tuples = []
sorted_symbols1 = []
sorted_symbols2 = []
for symbol in symbol1.symbols:
symbol.set_occurrences(script1)
sorted_symbols1.append(symbol)
for symbol in symbol2.symbols:
symbol.set_occurrences(script2)
sorted_symbols2.append(symbol)
sorted_symbols1.sort(key=get_occurrences)
sorted_symbols2.sort(key=get_occurrences)
for symbol1 in sorted_symbols1:
for symbol2 in sorted_symbols2:
if symbol1.type == symbol2.type:
sorted_symbols2.remove(symbol2)
metamophic_tuples.append(MetamorphicTuple(symbol1, symbol2))
break
return metamophic_tuples
def get_occurrences(symbol):
return symbol.occurrences
def replace_variable(line, source, target, prob=100):
if "((%s " % source in line:
return line # source is a quantifier
l = []
for token in line.split(" "):
if token == source or token.startswith(source+")") or token.endswith("("+source):
weighted_random = [True] * prob + [False] * (100 - prob)
if random.choice(weighted_random):
l.append(token.replace(source, target))
else:
l.append(token)
else:
l.append(token)
return " ".join(l)
def shift_script(script, prefix):
symbols = get_declared_symbols(script)
var_map = symbols.get_shiftmap(prefix)
script_text = script.split('\n')
new_script_text = ""
for line in script_text:
new_line = line
for var in var_map:
new_line = replace_variable(new_line, var, var_map[var])
if not new_line.startswith(";"):
new_script_text = new_script_text + " " + new_line
return new_script_text
| [
"noreply@github.com"
] | bingoko.noreply@github.com |
46b7f3b5630906b2591578476fbf9b5df898667a | 628a0adb428411fac1e74abfbcfa150a47084916 | /Finale_Code/Approaches/Feature_Based/bm25_parameter_tuning.py | 0c15667d1e62cb22e5e522382e9e8f2f9b382b95 | [] | no_license | KaiBaeuerle/Information_Retrieval | a9abe8d55af3a107225a466ddc204a2a8f28acec | 494ea3d38960c7f79cf7b1fa5eeeadacad52f442 | refs/heads/main | 2023-05-29T00:48:11.324584 | 2021-06-13T19:02:52 | 2021-06-13T19:02:52 | 351,711,747 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,171 | py | import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn import metrics
import sys
import pickle
from sklearn.experimental import enable_halving_search_cv
from sklearn.model_selection import HalvingGridSearchCV
sys.path.append('/media/Moritz/080FFDFF509A959E/BWsync_share/Master_BW/Information_Retrieval_Project/code')
from preprocess import preprocess # own preprocessing function
path = "/media/Moritz/Seagate Backup Plus Drive/University/Information_Retrieval_Project/data/"
# to concatenate positive and negative examples ("split up" the data set)
# read positive and then negative entries
train_pos = pd.read_csv(path + "triples.train.small.tsv",
sep = "\t", nrows = 200000, header = None, skiprows = 1, usecols = [0,1])
train_neg = pd.read_csv(path + "triples.train.small.tsv",
sep = "\t", nrows = 200000, header = None, skiprows = 1, usecols = [0,2])
def load_obj(name):
with open(path + name + '.pkl', 'rb') as f:
return pickle.load(f)
idf = load_obj("idf")
#%% Data manipulation
train_pos.columns = ["query", "passage"]
train_neg.columns = ["query", "passage"]
train_pos["relevant"] = 1 # target label
train_neg["relevant"] = 0
train = train_pos.append(train_neg)
#%% Feature computation
### define functions to extract features given a query and passage
def bm25(idf, query, passage, avg_len_passages, k = 1.5, b = 0.75): # what is a common value for k?
query_bow = preprocess(query)
passage_bow = preprocess(passage)
common_words = list(set(query_bow) & set(passage_bow))
bm25 = 0
for word in common_words:
bm25 += (idf[word] * (k + 1) * np.count_nonzero(passage_bow == word)
/ (np.count_nonzero(passage_bow == word) + k * ((1 - b) + b * passage_bow.size/avg_len_passages)))
return bm25
### add bm25 of query, passage pair as a feature to the dataset:
X = train.copy()
avg_len_passages = 35.4 # See file "estimate_avg_passage_len.py"
#X["bm25"] = X[["query", "passage"]].apply(lambda x: bm25(idf, x[0], x[1], avg_len_passages),axis=1)
#%%
# define y
range_b = np.arange(0.5,1,0.1)
range_k = np.arange(1,2,0.1)
#performance_grid = pd.DataFrame(np.zeros((len(range_b),len(range_k))))
#performance_grid = performance_grid.reindex(np.round(list(range_b), 3))
#performance_grid.columns = np.round(list(range_k),3)
y = X["relevant"]
"""
# approach: Since we know that b = 0.75 is a common choice,
# we choose an optimal k keeping b fixed at 0.75 and then optimize b for this k.
auc_b_075 = []
b = 0.75
for k in range_k:
#compute X based on b and k
x = X[["query", "passage"]].apply(lambda x: bm25(idf, x[0], x[1], avg_len_passages, k = k, b = b), axis=1)
model = LogisticRegressionCV(cv = 5).fit(x.values.reshape(-1,1), y)
pred = model.predict_proba(x.values.reshape(-1,1)).transpose()[1]
auc = metrics.roc_auc_score(y,pred)
auc_b_075.append(auc)
print("for k = {} and b = {} auc is {}".format(k,b, auc))
k_opt = list(range_k)[np.argmax(auc_b_075)]
auc_k_opt = []
for b in range_b:
x = X[["query", "passage"]].apply(lambda x: bm25(idf, x[0], x[1], avg_len_passages, k = k_opt, b = b), axis=1)
model = LogisticRegressionCV(cv = 5).fit(x.values.reshape(-1,1), y)
pred = model.predict_proba(x.values.reshape(-1,1)).transpose()[1]
auc = metrics.roc_auc_score(y,pred)
auc_k_opt.append(auc)
print("for k = {} and b = {} auc is {}".format(k_opt, b, auc))
b_opt = list(range_b)[np.argmax(auc_k_opt)]
print("Highest auc of {} achieved with b = {} and k = {}".format(max(auc_k_opt), b_opt, k_opt))
# compare to k_opt with b = 0.75
x = X[["query", "passage"]].apply(lambda x: bm25(idf, x[0], x[1], avg_len_passages, k = k_opt, b = 0.75), axis=1)
model = LogisticRegressionCV(cv = 5).fit(x.values.reshape(-1,1), y)
pred = model.predict_proba(x.values.reshape(-1,1)).transpose()[1]
auc = metrics.roc_auc_score(y,pred)
print("for k = {} and b = {} auc is {}".format(k_opt, 0.75, auc))
# Observation: auc is not stringly effected by the choices of k and b. Highest auc is achieved for
# k = 1, b = 0.8, so these hyperparameters will be used in the feature computation.
"""
#%% Successive halving hyperparameter tuning:
range_b = np.arange(0.5,1,0.05) # allows for a finer grid
range_k = np.arange(1,2,0.05)
logistic = LogisticRegression()
a = len(range_b) * len(range_k)
c = 0
while a > 1:
a = a/2
c+=1
# means: c iterations needed.
mat = np.full((len(range_b), len(range_k)), np.inf)
mat_bool = np.full((len(range_b), len(range_k)), True)
n = 500
for i in range(8):
sample = np.random.randint(400000, size=n)
for j in range(len(range_b)):
for l in range(len(range_k)):
if mat_bool[j,l]:
x = X.iloc[sample][["query", "passage"]].apply(lambda x: bm25(idf, x[0], x[1], avg_len_passages, k = range_k[l], b = range_b[j]), axis=1)
logistic.fit(x.values.reshape(-1,1), X.iloc[sample]["relevant"])
pred = logistic.predict_proba(x.values.reshape(-1,1)).transpose()[1]
auc = metrics.roc_auc_score(X.iloc[sample]["relevant"],pred)
mat[j,l] = auc
print(i)
best = []
for j in range(len(range_b)):
for l in range(len(range_k)):
if mat_bool[j][l]:
best.append(mat[j][l])
median = np.median(best)
for j in range(len(range_b)):
for l in range(len(range_k)):
if mat[j][l] < median:
mat_bool[j][l] = False
n = n + 1000
if i > 4: # simple way to increase resources for the best configurations
n = n + 4000
j, l = np.where(mat_bool == True)
range_b[j] # 0.8
range_k[l] # 1
## results of previous parameter tuning confirmed!
| [
"noreply@github.com"
] | KaiBaeuerle.noreply@github.com |
686464eb605c12cdfb8384943d02fe21966f09f3 | 525c2454c1e865b1377f372653666988f900660e | /day01/login.py | b252de86985189b235db3547568146e5c5cc8792 | [] | no_license | ysyi212/Study | 52ede4e7a6d6bbc993136d91fa7bbd7754d9030a | 69f49a681a672542f01a726acb596598f72823fd | refs/heads/master | 2020-12-14T17:13:44.737534 | 2020-01-28T08:31:28 | 2020-01-28T08:31:28 | 234,821,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | #!/root/ysy/python/bin/python3
name = input('input your name: ')
print('welcome',name) | [
"ysyi212@aliyun.com"
] | ysyi212@aliyun.com |
315730b88d08ce329c85893b22e65920bcdac068 | 878671ee6e501972fbba9cbc22651735ca2fa2e6 | /main.py | 53d529ca6ae6ac45c021cdb30874cabd345527ed | [] | no_license | aklowell/build-a-blog | 014b80fd268e3e74b9a57e8a1bd333e82579fc3e | b416b1abdbf8680f0a278a61a5fc0e99d92aaf6f | refs/heads/master | 2021-07-10T10:04:39.999099 | 2017-10-09T03:55:56 | 2017-10-09T03:55:56 | 105,560,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,803 | py | from flask import Flask, request, redirect, render_template
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://build-a-blog:build-a-blog@localhost:8889/build-a-blog'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(255))
body = db.Column(db.String(255))
def __init__(self, title='', body=''):
self.title = title
self.body = body
@app.route('/blog', methods=['POST','GET'])
def index():
if request.args:
blog_id=request.args.get("id")
blog=Blog.query.get(blog_id)
return render_template('blogentry.html',blog=blog)
else:
blogs=Blog.query.order_by(Blog.id.desc()).all()
return render_template('blogs.html', blogs=blogs)
@app.route('/newpost',methods=["POST", "GET"])
def add_blog():
if request.method=="GET":
return render_template('newpost.html')
title_error=''
body_error=''
if request.method=="POST":
blog_title=request.form['title']
blog_body=request.form['body']
if len(blog_body) < 1:
body_error="Please enter a blog entry."
if len(blog_title) < 1:
title_error ="Please enter a blog title."
if not title_error and not body_error:
new_blog=Blog(blog_title,blog_body)
db.session.add(new_blog)
db.session.commit()
query_param_url = "/blog?id=" + str(new_blog.id)
return redirect(query_param_url)
else:
return render_template('newpost.html', title_error=title_error, body_error=body_error,blog_title=blog_title,blog_body=blog_body)
if __name__ == '__main__':
app.run() | [
"anneklowell@outlook.com"
] | anneklowell@outlook.com |
13173c0c90b40858d24a8dd1a49957abc36658df | db6cbfe3fc997417fe390e6fbf6c7fb073223653 | /Lab3-4/UI.py | e620ebda1387634d49ca67224ca8d756d8a5f6a0 | [] | no_license | ecaterinacatargiu/AI | cad44bb6c7345fc4e0675770eecca2fac7741a34 | d7384f00b3f571fd01dec92e5737687b071104ab | refs/heads/main | 2023-03-12T19:28:47.942802 | 2021-02-19T18:13:45 | 2021-02-19T18:13:45 | 340,451,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,759 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 18:44:05 2020
@author: Cati
"""
from Controller import Controller
from Population import Population
from hc import *
class UI:
def __init__(self, ea:Population, controller: Controller):
self.ea = ea
self.controller = controller
def printMenu(self):
print()
print("Choose one: ")
print("1. EA")
print("2. Hill Climbing")
print("3. PSO")
def getEa(self):
return self.ea.iteration()
def getHc(self):
return self.hc.runHillClimbing(ea, ea.getPopSize())
def getPso(self):
return self.controller.runPSO()
def start(self):
self.printMenu()
command = int(input("Enter your command: "))
while command !=0:
if command == 1:
self.getEA()
else:
if command == 2:
self.getHC()
else:
if command == 3:
self.getPso()
else:
if command == 0:
return
else:
print("No command")
self.printMenu()
command = int(input("Enter your command: "))
def main():
size=int(input("Enter the size of the individual: "))
popSize=int(input("Enter the size of the population: "))
pM=int(input("Enter the probability of mutation: "))
pC=int(input("Enter the probability of crossover: "))
pop = Population(size,popSize, pM, pC)
ui = UI(pop)
ui.start()
main()
| [
"georgianaecaterina.catargiu@microfocus.com"
] | georgianaecaterina.catargiu@microfocus.com |
98f76ec619a2e488aa99de17c4447d474c1cb2e1 | 3f6c16ea158a8fb4318b8f069156f1c8d5cff576 | /.PyCharm2019.1/system/python_stubs/-1046095393/atexit.py | 3b4fb40c097ce9444aa1ae283f0da5efbfc50ffd | [] | no_license | sarthak-patidar/dotfiles | 08494170d2c0fedc0bbe719cc7c60263ce6fd095 | b62cd46f3491fd3f50c704f0255730af682d1f80 | refs/heads/master | 2020-06-28T23:42:17.236273 | 2019-10-01T13:56:27 | 2019-10-01T13:56:27 | 200,369,900 | 0 | 0 | null | 2019-08-03T12:56:33 | 2019-08-03T11:53:29 | Shell | UTF-8 | Python | false | false | 4,738 | py | # encoding: utf-8
# module atexit
# from (built-in)
# by generator 1.147
"""
allow programmer to define multiple exit functions to be executedupon normal program termination.
Two public functions, register and unregister, are defined.
"""
# no imports
# functions
def register(func, *args, **kwargs): # real signature unknown; restored from __doc__
"""
register(func, *args, **kwargs) -> func
Register a function to be executed upon normal program termination
func - function to be called at exit
args - optional arguments to pass to func
kwargs - optional keyword arguments to pass to func
func is returned to facilitate usage as a decorator.
"""
pass
def unregister(func): # real signature unknown; restored from __doc__
"""
unregister(func) -> None
Unregister an exit function which was previously registered using
atexit.register
func - function to be unregistered
"""
pass
def _clear(): # real signature unknown; restored from __doc__
"""
_clear() -> None
Clear the list of previously registered exit functions.
"""
pass
def _ncallbacks(): # real signature unknown; restored from __doc__
"""
_ncallbacks() -> int
Return the number of registered exit functions.
"""
return 0
def _run_exitfuncs(): # real signature unknown; restored from __doc__
"""
_run_exitfuncs() -> None
Run all registered exit functions.
"""
pass
# classes
class __loader__(object):
"""
Meta path import for built-in modules.
All methods are either class or static methods to avoid the need to
instantiate the class.
"""
@classmethod
def create_module(cls, *args, **kwargs): # real signature unknown
""" Create a built-in module """
pass
@classmethod
def exec_module(cls, *args, **kwargs): # real signature unknown
""" Exec a built-in module """
pass
@classmethod
def find_module(cls, *args, **kwargs): # real signature unknown
"""
Find the built-in module.
If 'path' is ever specified then the search is considered a failure.
This method is deprecated. Use find_spec() instead.
"""
pass
@classmethod
def find_spec(cls, *args, **kwargs): # real signature unknown
pass
@classmethod
def get_code(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have code objects. """
pass
@classmethod
def get_source(cls, *args, **kwargs): # real signature unknown
""" Return None as built-in modules do not have source code. """
pass
@classmethod
def is_package(cls, *args, **kwargs): # real signature unknown
""" Return False as built-in modules are never packages. """
pass
@classmethod
def load_module(cls, *args, **kwargs): # real signature unknown
"""
Load the specified module into sys.modules and return it.
This method is deprecated. Use loader.exec_module instead.
"""
pass
def module_repr(module): # reliably restored by inspect
"""
Return repr for the module.
The method is deprecated. The import machinery does the job itself.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
__dict__ = None # (!) real value is "mappingproxy({'__module__': '_frozen_importlib', '__doc__': 'Meta path import for built-in modules.\\n\\n All methods are either class or static methods to avoid the need to\\n instantiate the class.\\n\\n ', 'module_repr': <staticmethod object at 0x7f1f2a7150f0>, 'find_spec': <classmethod object at 0x7f1f2a715128>, 'find_module': <classmethod object at 0x7f1f2a715160>, 'create_module': <classmethod object at 0x7f1f2a715198>, 'exec_module': <classmethod object at 0x7f1f2a7151d0>, 'get_code': <classmethod object at 0x7f1f2a715240>, 'get_source': <classmethod object at 0x7f1f2a7152b0>, 'is_package': <classmethod object at 0x7f1f2a715320>, 'load_module': <classmethod object at 0x7f1f2a715358>, '__dict__': <attribute '__dict__' of 'BuiltinImporter' objects>, '__weakref__': <attribute '__weakref__' of 'BuiltinImporter' objects>})"
# variables with complex values
__spec__ = None # (!) real value is "ModuleSpec(name='atexit', loader=<class '_frozen_importlib.BuiltinImporter'>, origin='built-in')"
| [
"sarthakpatidar15@gmail.com"
] | sarthakpatidar15@gmail.com |
d59946de3156710a87637070b2375189e9f23961 | 499a24afc6fbd985353224cb09cb91b7b89a1c86 | /apps/base/urls.py | 4df70a5837f154fcbcfa983f52fa4bb1a6081089 | [
"MIT"
] | permissive | KenichiTanino/django_upload_with_mosaic-image | 68ed2d7d79cb5ead03ab8454f3bd98f0e688cde6 | 793fbb92e00e34ff091f7a5293bc610de36aa3eb | refs/heads/main | 2023-04-02T02:40:42.890557 | 2021-04-14T14:15:51 | 2021-04-14T14:15:51 | 356,816,981 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | """urlconf for the base application"""
from django.conf.urls import url
from .views import home
from .views import upload_file
urlpatterns = [
url(r'^$', home, name='home'),
url(r'upload_form$', upload_file, name='upload_form'),
]
| [
"tanino@a2.mbn.or.jp"
] | tanino@a2.mbn.or.jp |
85ade40b3fd736b98b5d56953fdfbddf0b7fe7e1 | fdf26c1191bbe22a310124e5e7d8d7ec752f3050 | /Backup/20150107093039/OmniSharp/commands/code_actions.py | aff9bfd644a66f9faefa43a5006f75db1fbe2af2 | [] | no_license | ahmetabdi/sublime-text-3 | 56a814706ba1b9a3a635e4f9f0f6c537476b82b9 | 13fa68c8cacca335bbc2ac02a537ea06a0386b5a | refs/heads/master | 2020-12-24T19:17:27.114354 | 2016-03-10T12:06:20 | 2016-03-10T12:06:20 | 15,140,480 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,639 | py | import sublime
import sublime_plugin
from ..lib import omnisharp
from ..lib import helpers
class OmniSharpCodeActions(sublime_plugin.TextCommand):
data = None
selectionStartColumn = 0
selectionStartLine = 0
selectionEndColumn = 0
selectionEndLine = 0
def run(self, edit):
if self.data is None:
selection = self.view.sel()
params = {}
if len(selection) > 0:
print('length is : ' + str(len(selection)))
location = selection[0]
cursor = self.view.rowcol(location.begin())
self.selectionStartLine = cursor[0] + 1
self.selectionStartColumn = cursor[1] + 1
othercursor = self.view.rowcol(location.end())
self.selectionEndLine = othercursor[0] + 1
self.selectionEndColumn = othercursor[1] + 1
params['selectionStartColumn'] = self.selectionStartColumn
params['selectionStartLine'] = self.selectionStartLine
params['selectionEndColumn'] = self.selectionEndColumn
params['selectionEndLine'] = self.selectionEndLine
omnisharp.get_response(
self.view, '/getcodeactions', self._handle_codeactions, params)
else:
self._show_code_actions_view(edit)
def _handle_codeactions(self, data):
print(data)
if data is None:
return
self.data = data
self.view.run_command('omni_sharp_code_actions')
def _show_code_actions_view(self, edit):
print('codeactions is :')
print(self.data)
self.quickitems = [];
if "CodeActions" in self.data and self.data["CodeActions"] != None:
for i in self.data["CodeActions"]:
print(i)
self.quickitems.append(i.strip())
if len(self.quickitems) > 0:
self.view.window().show_quick_panel(self.quickitems, self.on_done)
else:
self.data = None
self.selectionEndLine = 0
self.selectionEndColumn = 0
self.selectionStartLine = 0
self.selectionStartColumn = 0
def is_enabled(self):
return helpers.is_csharp(self.view)
def on_done(self, index):
if index == -1:
self.data = None
self.selectionEndLine = 0
self.selectionEndColumn = 0
self.selectionStartLine = 0
self.selectionStartColumn = 0
return
print("run index: " + str(index))
params = {}
params['codeAction'] = index
params['selectionStartColumn'] = self.selectionStartColumn
params['selectionStartLine'] = self.selectionStartLine
params['selectionEndColumn'] = self.selectionEndColumn
params['selectionEndLine'] = self.selectionEndLine
omnisharp.get_response(self.view, '/runcodeaction', self._handle_runcodeaction, params)
self.data = None
self.selectionEndLine = 0
self.selectionEndColumn = 0
self.selectionStartLine = 0
self.selectionStartColumn = 0
def _handle_runcodeaction(self, data):
print('runcodeaction is:')
print(data)
if data is None:
return
self.view.run_command("omni_sharp_run_code_action",{"args":{'text':data['Text']}})
class OmniSharpRunCodeAction(sublime_plugin.TextCommand):
def run(self, edit, args):
region = sublime.Region(0, self.view.size())
self.view.replace(edit, region, args['text'])
self.view.sel().clear() | [
"ahmetabdi@gmail.com"
] | ahmetabdi@gmail.com |
586d74151ef062c249063e4d5c269c511d557769 | 052a9999ddf26f98b19e5d9564eb4ded55c36009 | /src/model_generators/logistic_regression.py | 5bd50b12d6aadd1a76811974413312d9f7681ecb | [
"MIT"
] | permissive | Rosster/MLFinalProject | 6f0ae1ad8b93ecdf464796166c54e4417902ccbc | 521a739d4c5371db08179a54e22c76a9827136bb | refs/heads/master | 2020-04-26T09:54:33.734474 | 2019-03-23T01:39:04 | 2019-03-23T01:39:04 | 173,471,948 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,189 | py | import numpy as np
from sklearn.linear_model import LogisticRegressionCV
from sklearn import metrics
'''
Logistic Regression is a Machine Learning classification
algorithm that is used to predict the probability of a categorical dependent variable.
In logistic regression, the dependent variable is a binary variable that
contains data coded as 1 (yes, success, etc.) or 0 (no, failure, etc.).
In other words, the logistic regression model predicts P(Y=1) as a function of X.
Notes:
Binary logistic regression requires the dependent variable to be binary.
Only the meaningful variables should be included.
Logistic regression requires quite large sample sizes.
'''
RESPONSE_VARIABLE = 'sentiment'
def construct(train_df, opts={}, remove_features=None):
feature_cols = [col for col in train_df.columns if RESPONSE_VARIABLE not in col]
if remove_features:
feature_cols = [col for col in feature_cols if col not in remove_features]
X = train_df[feature_cols]
y = train_df[RESPONSE_VARIABLE]
model = LogisticRegressionCV(
fit_intercept=True, # Specifies if a constant (a.k.a. bias or intercept) should be added to the decision function.
cv=10,
solver='lbfgs', #For multiclass problems, only โnewton-cgโ, โsagโ, โsagaโ and โlbfgsโ handle multinomial loss
n_jobs=-1,
random_state=27
)
model.fit(X, y)
print('coeff, intercept -->', model.coef_, model.intercept_)
in_sample_accuracy = model.score(X, y)
print('in-sample accuracy --> ', in_sample_accuracy)
return model
def get_predictions(model, test_df, remove_features=None):
feature_cols = [col for col in test_df.columns if RESPONSE_VARIABLE not in col]
if remove_features:
feature_cols = [col for col in feature_cols if col not in remove_features]
X = test_df[feature_cols]
y_pred = model.predict(X)
return y_pred
def mean_accuracy(model, test_df, remove_features=None):
feature_cols = [col for col in test_df.columns if RESPONSE_VARIABLE not in col]
if remove_features:
feature_cols = [col for col in feature_cols if col not in remove_features]
X = test_df[feature_cols]
y = test_df[RESPONSE_VARIABLE]
return model.score(X, y) | [
"rohan.jyoti@variantyx.com"
] | rohan.jyoti@variantyx.com |
17a0b25b7520802c0316a50b66f74a804df1a76e | caaf56727714f8c03be38710bc7d0434c3ec5b11 | /tests/components/abode/test_light.py | 6506746783c2c8bc154c57ee3317833d02c7ff28 | [
"Apache-2.0"
] | permissive | tchellomello/home-assistant | c8db86880619d7467901fd145f27e0f2f1a79acc | ed4ab403deaed9e8c95e0db728477fcb012bf4fa | refs/heads/dev | 2023-01-27T23:48:17.550374 | 2020-09-18T01:18:55 | 2020-09-18T01:18:55 | 62,690,461 | 8 | 1 | Apache-2.0 | 2023-01-13T06:02:03 | 2016-07-06T04:13:49 | Python | UTF-8 | Python | false | false | 4,040 | py | """Tests for the Abode light device."""
from homeassistant.components.abode import ATTR_DEVICE_ID
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from .common import setup_platform
from tests.async_mock import patch
DEVICE_ID = "light.living_room_lamp"
async def test_entity_registry(hass):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(hass, LIGHT_DOMAIN)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entry = entity_registry.async_get(DEVICE_ID)
assert entry.unique_id == "741385f4388b2637df4c6b398fe50581"
async def test_attributes(hass):
"""Test the light attributes are correct."""
await setup_platform(hass, LIGHT_DOMAIN)
state = hass.states.get(DEVICE_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 204
assert state.attributes.get(ATTR_RGB_COLOR) == (0, 63, 255)
assert state.attributes.get(ATTR_COLOR_TEMP) == 280
assert state.attributes.get(ATTR_DEVICE_ID) == "ZB:db5b1a"
assert not state.attributes.get("battery_low")
assert not state.attributes.get("no_response")
assert state.attributes.get("device_type") == "RGB Dimmer"
assert state.attributes.get(ATTR_FRIENDLY_NAME) == "Living Room Lamp"
assert state.attributes.get(ATTR_SUPPORTED_FEATURES) == 19
async def test_switch_off(hass):
"""Test the light can be turned off."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_off") as mock_switch_off:
assert await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_off.assert_called_once()
async def test_switch_on(hass):
"""Test the light can be turned on."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.switch_on") as mock_switch_on:
await hass.services.async_call(
LIGHT_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: DEVICE_ID}, blocking=True
)
await hass.async_block_till_done()
mock_switch_on.assert_called_once()
async def test_set_brightness(hass):
"""Test the brightness can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_level") as mock_set_level:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "brightness": 100},
blocking=True,
)
await hass.async_block_till_done()
# Brightness is converted in abode.light.AbodeLight.turn_on
mock_set_level.assert_called_once_with(39)
async def test_set_color(hass):
"""Test the color can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color") as mock_set_color:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "hs_color": [240, 100]},
blocking=True,
)
await hass.async_block_till_done()
mock_set_color.assert_called_once_with((240.0, 100.0))
async def test_set_color_temp(hass):
"""Test the color temp can be set."""
await setup_platform(hass, LIGHT_DOMAIN)
with patch("abodepy.AbodeLight.set_color_temp") as mock_set_color_temp:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: DEVICE_ID, "color_temp": 309},
blocking=True,
)
await hass.async_block_till_done()
# Color temp is converted in abode.light.AbodeLight.turn_on
mock_set_color_temp.assert_called_once_with(3236)
| [
"noreply@github.com"
] | tchellomello.noreply@github.com |
50d07982618790ca5eb53375fa7512fb22df7f49 | 289689eada7efe5f18a71b2026f1e8a7ffa5ec7b | /2015/DigitalHealthPlan/HealthEquity/connect2.py | f2e86683325c170bcfecfdf9e313c185ed1b726e | [] | no_license | bayindri/Work | f23ebcac1160acb0bed5a3c94901bc70a7114217 | 8e1c922db71fa28eede4fb39cd6a7212e95eb24a | refs/heads/master | 2021-01-01T20:04:57.665056 | 2020-06-26T17:02:10 | 2020-06-26T17:02:10 | 98,761,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,541 | py | from pysimplesoap.client import SoapClient
#import logging
#logging.basicConfig(level=logging.DEBUG)
#client = SoapClient(wsdl="https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx?WSDL",soap_ns = 'mem', action ='') # worked!
client = SoapClient(
wsdl="https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx?wsdl",
#soap_ns = 'mem',
action ='MemberBalanceWebService/GetMemberBalance',
ns = 'mem')
print("Target Namespace", client.namespace)
import pprint
#pprint.pprint(client.services)
for service in client.services.values():
for port in service['ports'].values():
#print("PORT")
#print(port)
#print
print(port['location'])
# fix location (lhttps://www.HealthEquity.com:4000/Partner/MemberBalanceWebService.asmx is erroneous in the WSDL)
port['location'] = "https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx"
#print("AFTER PORT")
#print(port['location'])
#print
for op in port['operations'].values():
print("****************************")
print(port)
print(port['location'])
print(op)
print('Name:', op['name'])
print('Docs:', op['documentation'].strip())
print('Parts:', op['parts'])
print('Parts:input_header:', op['parts']['input_header'])
print('Parts:input_header:message:-', op['parts']['input_header']['message'])
print('Parts:input_header:part:', op['parts']['input_header']['part'])
print('Parts:input_body:', op['parts']['input_body'])
print('Parts:output_header:', op['parts']['output_header'])
print('Parts:output_body:', op['parts']['output_body'])
print('SOAPAction:', op['action'])
print('Input:', op['input']['GetMemberBalance']) # args type declaration
print('SessionId:', op['input']['GetMemberBalance']['SessionId']) # args type declaration
print('MemberId:', op['input']['GetMemberBalance']['MemberId']) # args type declaration
print('AccountTypeFlags:', op['input']['GetMemberBalance']['AccountTypeFlags']) # args type declaration
print('BalanceTypeFlags:', op['input']['GetMemberBalance']['BalanceTypeFlags']) # args type declaration
print('Output:', op['output']) # returns type declaration
print ("***************************")
# fix location (localhost:9050 is erroneous in the WSDL)
#client.services['IWebServiceService']['ports']['IWebServicePort']['location'] = "https://186.153.145.2:9050/trazamed.WebService"
#print(client.services.ports)
#client.services['MemberBalanceWebService']['ports']['MemberBalanceWebServicePort']['location'] = "https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx"
for service in client.services.values():
for port in service['ports'].values():
#print(port)
print(port['location'])
#client = SoapClient(location="https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx", action="MemberBalanceWebService", namespace="", ns="mem")
client['mem:MemberBalanceServiceAuthHeader'] = {'mem:Username': 'BCBSMA_websvc', 'mem:Password': 'MhGn031105K'}
#client['MemberBalanceServiceAuthHeader'] = {'Username': 'BCBSMA_websvc', 'Password': 'MhGn031105K'}
#print(client.GetMemberBalance(SessionId='9999',MemberId='981522383',AccountTypeFlags=4,BalanceTypeFlags=4))
#pprint.pprint(client.GetMemberBalance(SessionId='9999',MemberId='981522383',AccountTypeFlags=4,BalanceTypeFlags=4))
#pprint.pprint(client.GetMemberBalance(MemberId='981522383',AccountTypeFlags=4,BalanceTypeFlags=4))
#pprint.pprint(client.GetMemberBalance(MemberId='981522383'))
#client.xml_request()
print(client.as_xml())
pprint.pprint(client.GetMemberBalance())
pprint.pprint(client.GetMemberBalance.header())
pprint.pprint(client.GetMemberBalance('','981522383',4,4))
pprint.pprint(client.services)
#pprint.pprint(client.GetMemberBalance(SessionId='9999',MemberId='981522383',AccountTypeFlags=4,BalanceTypeFlags=4))
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
#response = client.GetMemberBalance(SessionId='9999',MemberId='981522383',AccountTypeFlags='4',BalanceTypeFlags='4')
'''from pysimplesoap.client import SoapClient
from pysimplesoap.simplexml import SimpleXMLElement
from lxml import objectify
client = SoapClient(
location = "https://www.HealthEquity.com/Partner/MemberBalanceWebService.asmx",
#action = 'MemberBalanceWebService/GetMemberBalance', # SOAPAction
#ns = 'mem'
)
params = SimpleXMLElement("""
<?xml version="1.0" encoding="UTF-8"?>
<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:mem="MemberBalanceWebService">
<soapenv:Header>
<mem:MemberBalanceServiceAuthHeader>
<!--Optional:-->
<mem:Username>BCBSMA_websvc</mem:Username>
<!--Optional:-->
<mem:Password>MhGn031105K</mem:Password>
</mem:MemberBalanceServiceAuthHeader>
</soapenv:Header>
<soapenv:Body>
<mem:GetMemberBalance>
<!--Optional:-->
<mem:SessionId>?</mem:SessionId>
<!--Optional:-->
<mem:MemberId>981522383</mem:MemberId>
<mem:AccountTypeFlags>4</mem:AccountTypeFlags>
<mem:BalanceTypeFlags>4</mem:BalanceTypeFlags>
</mem:GetMemberBalance>
</soapenv:Body>
</soapenv:Envelope>""") # manually make request msg
response = client.call('mem:GetMemberBalance',params)
result = response.GetMemberBalanceResult
print(result) # manully convert returned type'''
| [
"ayindri.banerjee@bcbsma.com"
] | ayindri.banerjee@bcbsma.com |
38488e71218e96a97bd3c3bb00261ae97ea6eb8c | 633701ef4d039d2cd0d4409bd8ad765b748f1b96 | /ZuheGesdatos/src/datostunnel/__init__.py | 60b4839076fb39f7710dc8e5171c086c49d23aa5 | [] | no_license | wahello/gesdatos | 4c991536f3265bf937ad117ed0c9c9b913182db5 | b7fa1939056baa01b48d310d981a5fb1493d6698 | refs/heads/master | 2020-03-11T12:25:37.275071 | 2015-12-14T04:25:35 | 2015-12-14T04:25:35 | null | 0 | 0 | null | null | null | null | ISO-8859-10 | Python | false | false | 2,203 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Daniel Romero"
__date__ = "$3/12/2015 10:24:29 AM$"
import base64
##-----------------------------------------------------------
class paramtunnel():
""" Clase realizada para generar por mediod e codificacion los parametros que el
aplicativo necesita para generar un tunnel al servidor del grupo deinvestigacion."""
def __init__(self):
""" Metodo que inicia los parametros."""
txtfile = "strkenc" #ponemos el archivo a encriptar
txt_text = base64.decodestring(open(txtfile,"rb").read()) #indicamos que habra el txtfile (archivo.txt) y codifique la cadena a base64 y el resultado lo ponga entre comillas """
#print txt_text.split("\r\n") #imprime en la pantalla txt_text que contiene el resultado codificado a base64
self.dirserver = str(txt_text.split("\r\n")[0])
self.porttunnel = str(txt_text.split("\r\n")[1])
self.pasuser = str(txt_text.split("\r\n")[2])
self.user= str(txt_text.split("\r\n")[3])
self.localip = str(txt_text.split("\r\n")[4])
self.databaseserverport= "5432"
def getidirserver(self):
"""consultor que retorna la direccion IP del servidor"""
return self.dirserver
def getporttunnel(self):
"""consultor que retorna la el puerto para el tunnel del servidor"""
return self.porttunnel
def getuser(self):
"""consultor que retorna el usuario para rar tunnel"""
return self.user
def getpasuser(self):
"""consultor que retorna la contraseลa del usuario del servidor"""
return self.pasuser
def getlocalip(self):
"""consultor que retorna la dirrecion del servidor de base de datos"""
return self.localip
def getdatabaseserverport(self):
"""consultor que retorna el puerto del servidor de base de datos"""
return self.databaseserverport
tunnel = paramtunnel() | [
"User@DANIELRPARRA77"
] | User@DANIELRPARRA77 |
5efc101cdbf8e412920f0ccebaf0c2a572e6f7ba | af6e7f0927517375cb4af833f4c52e301bad0af5 | /corpus_processor/topic_aware/filter_qa_corpus_by_topic_list.py | 90d3fa8fa6d532a86b504d45378701a28a47ca24 | [] | no_license | wolfhu/DialogPretraining | 470334fd815e1299981b827fdc933d237a489efd | eeeada92146d652d81ca6e961d1298924ac8435d | refs/heads/main | 2023-06-25T15:22:54.728187 | 2021-07-21T01:40:23 | 2021-07-21T01:40:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,834 | py | # encoding: utf-8
import sys
from util.trie import Trie
tag_file_path = '/home/t-yuniu/xiaoice/yuniu/dataset/processed/domain/sport/keywords'
# Tag ้ปๅๅ
tag_black_dict = {}
# tag_black_dict.setdefault('ๆธธๆ', True)
tag_trie = Trie()
def detect_tag(sentence):
"""
Judge if sentence contain as least a tag.
:param sentence: query or answer
:return: boolean, True if contain, False otherwise.
"""
length = len(sentence)
detected_tags = []
for idx in range(length):
node = tag_trie.lookup
idx_tmp = idx
while True:
if idx_tmp >= length:
break
if sentence[idx_tmp] in node:
node = node[sentence[idx_tmp]]
idx_tmp += 1
if Trie.END in node:
detected_tags.append(sentence[idx:idx_tmp])
else:
break
return detected_tags
if __name__ == '__main__':
# build trie from tag file
with open(tag_file_path) as douban_tag_file:
for line in douban_tag_file.readlines():
tag = line.strip()
if len(tag) == 1 or tag in tag_black_dict:
continue
tag_trie.insert(tag)
# filter corpus contain tags
while True:
line = sys.stdin.readline().strip()
if line:
try:
line = line.replace('#', '')
query, answer = line.split('\t')[:2]
# detected_tags = detect_tag(query)
detected_tags = []
detected_tags.extend(detect_tag(answer))
if len(detected_tags) > 0:
print('\t'.join([' '.join(set(detected_tags)), query, answer]))
except ValueError:
sys.stdout.write('Illegal line.\n')
else:
break
| [
"yuwu1@microsoft.com"
] | yuwu1@microsoft.com |
c2eab84e232f590469f2bb0cea19a803ec121d0f | 2fabc9255adbe1cc055eb4b2402f8526f389f257 | /model/modules.py | 86464633b715d37b344f74882941fce2b5d70ab8 | [
"MIT"
] | permissive | asr2021/WaveGrad2 | 657323be12d16667fc0a3b7f2a168101e6e913cb | ba7715d760999093dd99283f48971c5115210b51 | refs/heads/main | 2023-06-02T18:48:56.830462 | 2021-06-23T07:22:10 | 2021-06-23T08:10:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,959 | py | import os
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as F
from .blocks import (
ZoneOutBiLSTM,
LinearNorm,
ConvBlock,
)
from text.symbols import symbols
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class TextEncoder(nn.Module):
""" Text Encoder """
def __init__(self, config):
super(TextEncoder, self).__init__()
n_src_vocab = len(symbols) + 1
d_word_vec = config["transformer"]["encoder_hidden"]
n_layers = config["transformer"]["encoder_layer"]
d_model = config["transformer"]["encoder_hidden"]
kernel_size = config["transformer"]["encoder_kernel_size"]
dropout = config["transformer"]["encoder_dropout"]
zoneout = config["transformer"]["encoder_zoneout"]
self.d_model = d_model
self.src_word_emb = nn.Embedding(
n_src_vocab, d_word_vec, padding_idx=0
)
self.conv_stack = nn.ModuleList(
[
ConvBlock(
d_model, d_model, kernel_size=kernel_size, dropout=dropout
)
for _ in range(n_layers)
]
)
self.lstm = ZoneOutBiLSTM(
d_model, zoneout_rate=zoneout
)
def forward(self, src_seq, mask=None):
enc_output = self.src_word_emb(src_seq)
for conv in self.conv_stack:
enc_output = conv(enc_output, mask=mask)
enc_output = self.lstm(enc_output)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0.)
return enc_output
class VarianceAdaptor(nn.Module):
""" Variance Adaptor """
def __init__(self, preprocess_config, model_config):
super(VarianceAdaptor, self).__init__()
self.duration_predictor = DurationPredictor(model_config)
self.gaussian_upsampling = GaussianUpsampling(model_config)
def forward(
self,
x,
src_mask,
duration_target=None,
d_control=1.0,
):
log_duration_prediction = self.duration_predictor(x, src_mask)
if duration_target is not None:
x, attn = self.gaussian_upsampling(x, duration_target, src_mask)
duration_rounded = duration_target
else:
duration_rounded = torch.clamp(
(torch.round(torch.exp(log_duration_prediction) - 1) * d_control),
min=0,
)
x, attn = self.gaussian_upsampling(x, duration_rounded, src_mask)
return (
x,
log_duration_prediction,
duration_rounded,
attn,
)
class GaussianUpsampling(nn.Module):
""" Gaussian Upsampling """
def __init__(self, model_config):
super(GaussianUpsampling, self).__init__()
# self.range_param_predictor = RangeParameterPredictor(model_config)
def forward(self, encoder_outputs, duration, mask):
device = encoder_outputs.device
# range_param = self.range_param_predictor(encoder_outputs, duration, mask)
t = torch.sum(duration, dim=-1, keepdim=True) #[B, 1]
e = torch.cumsum(duration, dim=-1).float() #[B, L]
c = e - 0.5 * duration #[B, L]
t = torch.arange(1, torch.max(t).item()+1, device=device) # (1, ..., T)
t = t.unsqueeze(0).unsqueeze(1) #[1, 1, T]
c = c.unsqueeze(2)
# print(range_param, 0.1*(range_param ** 2))
# w_1 = torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2) # [B, L, T]
# w_2 = torch.sum(torch.exp(-0.1*(range_param.unsqueeze(-1) ** -2) * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_1 = torch.exp(-0.1 * (t - c) ** 2) # [B, L, T]
w_2 = torch.sum(torch.exp(-0.1 * (t - c) ** 2), dim=1, keepdim=True) # [B, 1, T]
w_2[w_2==0.] = 1.
# w_1 = self.normpdf(t, c, range_param.unsqueeze(-1)) # [B, L, T]
# w_1 = torch.distributions.normal.Normal(c, 0.1).log_prob(t) # [B, L, T]
# w_2 = torch.sum(w_1, dim=1, keepdim=True) # [B, 1, T]
# w_2[w_2==0.] = 1.
w = w_1 / w_2
out = torch.matmul(w.transpose(1, 2), encoder_outputs)
return out, w
class DurationPredictor(nn.Module):
""" Duration Parameter Predictor """
def __init__(self, model_config):
super(DurationPredictor, self).__init__()
encoder_hidden = model_config["transformer"]["encoder_hidden"]
variance_hidden = model_config["variance_predictor"]["variance_hidden"]
self.duration_lstm = nn.LSTM(
encoder_hidden,
int(variance_hidden / 2), 2,
batch_first=True, bidirectional=True
)
self.duration_proj = nn.Sequential(
LinearNorm(variance_hidden, 1),
nn.ReLU(),
)
def forward(self, encoder_output, mask):
duration_prediction, _ = self.duration_lstm(encoder_output)
duration_prediction = self.duration_proj(duration_prediction)
duration_prediction = duration_prediction.squeeze(-1) # [B, L]
if mask is not None:
duration_prediction = duration_prediction.masked_fill(mask, 0.0)
return duration_prediction
# class RangeParameterPredictor(nn.Module):
# """ Range Parameter Predictor """
# def __init__(self, model_config):
# super(RangeParameterPredictor, self).__init__()
# encoder_hidden = model_config["transformer"]["encoder_hidden"]
# variance_hidden = model_config["variance_predictor"]["variance_hidden"]
# self.range_param_lstm = nn.LSTM(
# encoder_hidden + 1,
# int(variance_hidden / 2), 2,
# batch_first=True, bidirectional=True
# )
# self.range_param_proj = nn.Sequential(
# LinearNorm(variance_hidden, 1),
# nn.Softplus(),
# )
# def forward(self, encoder_output, duration, mask):
# range_param_input = torch.cat([encoder_output, duration.unsqueeze(-1)], dim=-1)
# range_param_prediction, _ = self.range_param_lstm(range_param_input)
# range_param_prediction = self.range_param_proj(range_param_prediction)
# range_param_prediction = range_param_prediction.squeeze(-1) # [B, L]
# if mask is not None:
# range_param_prediction = range_param_prediction.masked_fill(mask, 0.0)
# return range_param_prediction
class SamplingWindow(nn.Module):
""" Sampling Window """
def __init__(self, model_config, train_config):
super(SamplingWindow, self).__init__()
self.upsampling_rate = model_config["wavegrad"]["upsampling_rate"]
self.segment_length_up = train_config["window"]["segment_length"]
self.segment_length = train_config["window"]["segment_length"] // self.upsampling_rate
def pad_seq(self, seq, segment_length):
if len(seq.shape) > 2:
return torch.nn.functional.pad(
seq.transpose(-2, -1), (0, segment_length - seq.shape[1]), 'constant'
).data.transpose(-2, -1)
return torch.nn.functional.pad(
seq, (0, segment_length - seq.shape[1]), 'constant'
).data
def get_hidden_segment(self, hiddens, seq_starts):
batch = list()
for i, (hidden, seq_start) in enumerate(zip(hiddens, seq_starts)):
batch.append(hidden[seq_start:seq_start+self.segment_length])
return torch.stack(batch)
def forward(self, encoder_output, audio, seq_starts=None, full_len=False):
if full_len:
return encoder_output, audio
if encoder_output.shape[1] > self.segment_length:
encoder_segment = self.get_hidden_segment(encoder_output, seq_starts)
encoder_segment = self.pad_seq(encoder_output, self.segment_length)
audio_segment = self.pad_seq(audio, self.segment_length_up)
return encoder_segment, audio_segment
| [
"keonlee9420@gmail.com"
] | keonlee9420@gmail.com |
4dad72ebc7956f2e83c677733d880dec2b2fd50f | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_10938.py | 978f85ef643f608646c73eface82e3ca6748bc7b | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # Installing psycopg2 on Mountain Lion (brew + pip)
defaults write com.apple.versioner.python Prefer-32-Bit -bool no
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
ee31424e1fa619b23210e9169fac7dda9b51a252 | b46e30188f8e132b012cc07af947aed5f07f9e97 | /find_cosine_simi.py | 9fd40ec226cd72ac79198a3b0f6e4f36e15d1cb1 | [] | no_license | mahmudulhasancsedu5/sent2vect | 84b8ab1b6640d88fbe698019eee4f5a12ed39322 | fe36bea17f6b888b5b8796021c135b8d9bf4041e | refs/heads/master | 2021-01-11T02:53:00.328335 | 2016-10-22T04:39:18 | 2016-10-22T04:39:18 | 70,888,705 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,755 | py | """
-------reade vectore file and find cosine similarity-------
"""
file=open('news1.txt.vec','r')
outputFile=open('news1_sentence_cosine_value.txt','w')
#lines_list=file_id.readlines()
#print lines_list
line_list=file.readlines()
line_1=line_list[0].split(' ')
row_count=int(line_1[0])
colum_count=int(line_1[1])
data_arr=[]
mat=[]
i=0
"""
delete the first 2 length row , clm array and
create only a vectore array
"""
for line in line_list:
if i!=0:
data_arr=line.replace('\n','').split(' ')
mat.append(data_arr)
i+=1
"""
1. remove sent_x string from front
2. replace string value by int(str_value) value
"""
for i in range(0,row_count):
line=mat[i]
line.remove('sent_'+str(i))
line=[float(x) for x in line]
mat[i]=line
#print len(mat[1])
from scipy import spatial
#-------test
#cosine_val=1-spatial.distance.cosine(mat[1],mat[2])
#print cosine_val
"""
#test
for x in mat:
print len(x)
"""
"""
1. calculate pair wise cosine(s1,s2) value
"""
sent_cosine_val_array=[]
i=0
for u in mat:
j=0
sent_u_val=0.0
for v in mat:
#print i,j
cosine_val=1-spatial.distance.cosine(u,v)
sent_u_val+=cosine_val
#cosine_val=float(cosine_val)
#print "cosine(u,v) = "+str(cosine_val)+"\n"
j+=1
print "sent_"+str(i)+" total cosine value = "+str(sent_u_val)
out_str=str("sent_"+str(i)+" "+str(sent_u_val))
sent_cosine_val_array.append(sent_u_val/len(mat))
outputFile.write(out_str+'\n')
i+=1
print "len-----> "+str(len(sent_cosine_val_array))
file.close()
outputFile.close()
"""
for line in line_list:
file1.write(str(line)+""+str(i))
print str(line)+"------"+str(i)
i=i+1
"""
| [
"hasancsedu5@gmail.com"
] | hasancsedu5@gmail.com |
65fb7d1f09f5025ac382844a2d9ef74135ec3e29 | d1367994cea3d3b08e48b66597a63fdb49c68dac | /photo/models.py | c583d7af321ee87ec39fe73677878c5c76e1d160 | [] | no_license | alasheep/pystagram | 8b44216604192a7db33323df350d0c994ab90f75 | 45d44af7f9cc5b9dab73ec570a568bc854dfa52b | refs/heads/master | 2016-08-12T07:01:41.724847 | 2016-01-01T23:52:36 | 2016-01-01T23:52:36 | 48,722,903 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | #from __future__ import unicode_literals
# coding: utf-8
from django.db import models
# Create your models here.
class Photo(models.Model):
#id = '๊ฐ๋ณ ์ฌ์ง์ ๊ตฌ๋ถํ๋ ์์ธ๊ฐ'
image_file = models.ImageField(upload_to='%Y/%m/%d')
filtered_image_file = models.ImageField(upload_to='static_files/uploaded/%Y/%m/%d')
description = models.TextField(max_length=500, blank=True)
created_at = models.DateTimeField(auto_now_add=True)
def delete(self, *args, **kwargs):
self.image_file.delete()
self.filtered_image_file.delete()
super(Photo, self).delete(*args, **kwargs)
| [
"alasheep@hanmail.net"
] | alasheep@hanmail.net |
08de5456e8af14a088ef40511b9b3774b8805421 | a7807e4a49a06b748cff273fe8c0dc79b5e64ca8 | /orby/Scripts/django-admin.py | e15c2e9f1d2447bd8344d9aa8ae6a8f207aaf426 | [] | no_license | orhunakar01/labotestalep | 0cb864522821f9d4f168996db15a38fc166d57b3 | 6c6958d49e65d30d5f80c09ee1618c8cc7dd8100 | refs/heads/master | 2023-03-23T00:32:49.474106 | 2021-03-16T18:46:08 | 2021-03-16T18:46:08 | 348,455,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 695 | py | #!c:\users\orhun\desktop\djangologin\orby\scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"orhunakar@yandex.com"
] | orhunakar@yandex.com |
2bb9e8941509720018e382a768800bf12e758dab | 46fb758e130547f8283df0d40ecfd89dd4b323c6 | /hooks/clang_tidy.py | 3b35ed0f7f975cd44f972a57d760cdb4fffb1faf | [
"Apache-2.0"
] | permissive | zackw/pre-commit-lint-c | c3db0041f9b8b673f012efc7dde4875eabc4b691 | 8e4e813fd8c8149d62d92b006b25cf53eadf38e1 | refs/heads/master | 2023-06-27T04:51:18.553095 | 2021-08-01T00:45:39 | 2021-08-01T01:01:12 | 391,479,933 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,731 | py | #!/usr/bin/env python3
"""Wrapper script for clang-tidy."""
#############################################################################
import re
import sys
from hooks.utils import ClangAnalyzerCmd
class ClangTidyCmd(ClangAnalyzerCmd):
"""Class for the clang-tidy command."""
command = "clang-tidy"
lookbehind = "LLVM version "
def __init__(self, args):
super().__init__(self.command, self.lookbehind, args)
self.parse_args(args)
self.edit_in_place = "-fix" in self.args or "--fix-errors" in self.args
self.parse_ddash_args()
def run(self):
"""Run clang-tidy"""
for filename in self.files:
self.run_command(filename)
sys.stdout.buffer.write(self.stdout)
# The number of warnings depends on errors in system files
self.stderr = re.sub(b"\d+ warnings and ", b"", self.stderr)
# Don't output stderr if it's complaining about problems in system files
no_sysfile_warning = b"non-user code" not in self.stderr
# On good clang-tidy checks, it will spew warnings to stderr
if len(self.stdout) > 0 and no_sysfile_warning:
sys.stderr.buffer.write(self.stderr)
else:
self.stderr = b""
has_errors = (
b"error generated." in self.stderr
or b"errors generated." in self.stderr
)
if has_errors: # Change return code if errors are generated
self.returncode = 1
if self.returncode != 0:
sys.exit(self.returncode)
def main(argv=None):
cmd = ClangTidyCmd(argv)
cmd.run()
if __name__ == "__main__":
main()
| [
"rj@swit.sh"
] | rj@swit.sh |
cee380e5080ef322e11d53fe0e88028403f92664 | 8176d7939cb6058f818196196168eeb4a9206766 | /crudApp/admin.py | 874c5a3309b3aebdb9efe0a824cb688b9095760d | [] | no_license | cs-fullstack-2019-spring/django-crud2-cw-tdude0175 | d8f0d2d587b8e4fc2c00890ee0be02cadef5e87f | fa0428c3e09c93022318003f226e0168b47ab5a5 | refs/heads/master | 2020-04-26T00:11:55.314615 | 2019-03-02T00:00:41 | 2019-03-02T00:00:41 | 173,169,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | from django.contrib import admin
from .models import ContactModel
# Register your models here.
admin.site.register(ContactModel) | [
"tdude0175@gmail.com"
] | tdude0175@gmail.com |
8443c14c2499a5e15fc3179e5faf3e4f3d754aa7 | 76b15070c6cde9366a6133a6ecdf556715131497 | /custom_components/salus/const.py | 3072020f217f46de282a3b9a524d5a3878ab511b | [
"MIT"
] | permissive | MatthewAger/homeassistant_salus | f38c11f40f338597fb04771bd3986da13331e03e | b74f316e3254faadbf4ae919f8d5b9e8fa9349ba | refs/heads/master | 2023-06-24T00:26:46.325866 | 2021-04-24T09:53:35 | 2021-04-24T09:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | """Constants of the Salus iT600 component."""
DOMAIN = "salus"
| [
"julius.vitkauskas@trafi.com"
] | julius.vitkauskas@trafi.com |
66aefdce6c1839e0f4b8dfbe62df72f1d60af25d | b15d2787a1eeb56dfa700480364337216d2b1eb9 | /accelbyte_py_sdk/api/match2/operations/backfill/accept_backfill.py | e35c0aa725d1ba4f04c748c7ad397734654b2930 | [
"MIT"
] | permissive | AccelByte/accelbyte-python-sdk | dedf3b8a592beef5fcf86b4245678ee3277f953d | 539c617c7e6938892fa49f95585b2a45c97a59e0 | refs/heads/main | 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 | MIT | 2022-08-02T03:54:11 | 2021-09-27T04:00:10 | Python | UTF-8 | Python | false | false | 8,605 | py | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
# AccelByte Gaming Services Match Service V2 (2.8.4)
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from .....core import Operation
from .....core import HeaderStr
from .....core import HttpResponse
from ...models import ApiBackFillAcceptRequest
from ...models import ModelsGameSession
from ...models import ResponseError
class AcceptBackfill(Operation):
"""Accept a backfill proposal (AcceptBackfill)
Required Permission: NAMESPACE:{namespace}:MATCHMAKING:BACKFILL [UPDATE]
Required Scope: social
Accept backfill proposal
Required Permission(s):
- NAMESPACE:{namespace}:MATCHMAKING:BACKFILL [UPDATE]
Required Scope(s):
- social
Properties:
url: /match2/v1/namespaces/{namespace}/backfill/{backfillID}/proposal/accept
method: PUT
tags: ["Backfill", "public"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH]
body: (body) REQUIRED ApiBackFillAcceptRequest in body
backfill_id: (backfillID) REQUIRED str in path
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - ModelsGameSession (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
403: Forbidden - ResponseError (Forbidden)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
"""
# region fields
_url: str = (
"/match2/v1/namespaces/{namespace}/backfill/{backfillID}/proposal/accept"
)
_method: str = "PUT"
_consumes: List[str] = ["application/json"]
_produces: List[str] = ["application/json"]
_securities: List[List[str]] = [["BEARER_AUTH"]]
_location_query: str = None
body: ApiBackFillAcceptRequest # REQUIRED in [body]
backfill_id: str # REQUIRED in [path]
namespace: str # REQUIRED in [path]
# endregion fields
# region properties
@property
def url(self) -> str:
return self._url
@property
def method(self) -> str:
return self._method
@property
def consumes(self) -> List[str]:
return self._consumes
@property
def produces(self) -> List[str]:
return self._produces
@property
def securities(self) -> List[List[str]]:
return self._securities
@property
def location_query(self) -> str:
return self._location_query
# endregion properties
# region get methods
# endregion get methods
# region get_x_params methods
def get_all_params(self) -> dict:
return {
"body": self.get_body_params(),
"path": self.get_path_params(),
}
def get_body_params(self) -> Any:
if not hasattr(self, "body") or self.body is None:
return None
return self.body.to_dict()
def get_path_params(self) -> dict:
result = {}
if hasattr(self, "backfill_id"):
result["backfillID"] = self.backfill_id
if hasattr(self, "namespace"):
result["namespace"] = self.namespace
return result
# endregion get_x_params methods
# region is/has methods
# endregion is/has methods
# region with_x methods
def with_body(self, value: ApiBackFillAcceptRequest) -> AcceptBackfill:
self.body = value
return self
def with_backfill_id(self, value: str) -> AcceptBackfill:
self.backfill_id = value
return self
def with_namespace(self, value: str) -> AcceptBackfill:
self.namespace = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result: dict = {}
if hasattr(self, "body") and self.body:
result["body"] = self.body.to_dict(include_empty=include_empty)
elif include_empty:
result["body"] = ApiBackFillAcceptRequest()
if hasattr(self, "backfill_id") and self.backfill_id:
result["backfillID"] = str(self.backfill_id)
elif include_empty:
result["backfillID"] = ""
if hasattr(self, "namespace") and self.namespace:
result["namespace"] = str(self.namespace)
elif include_empty:
result["namespace"] = ""
return result
# endregion to methods
# region response methods
# noinspection PyMethodMayBeStatic
def parse_response(
self, code: int, content_type: str, content: Any
) -> Tuple[
Union[None, ModelsGameSession], Union[None, HttpResponse, ResponseError]
]:
"""Parse the given response.
200: OK - ModelsGameSession (OK)
400: Bad Request - ResponseError (Bad Request)
401: Unauthorized - ResponseError (Unauthorized)
403: Forbidden - ResponseError (Forbidden)
404: Not Found - ResponseError (Not Found)
500: Internal Server Error - ResponseError (Internal Server Error)
---: HttpResponse (Undocumented Response)
---: HttpResponse (Unexpected Content-Type Error)
---: HttpResponse (Unhandled Error)
"""
pre_processed_response, error = self.pre_process_response(
code=code, content_type=content_type, content=content
)
if error is not None:
return None, None if error.is_no_content() else error
code, content_type, content = pre_processed_response
if code == 200:
return ModelsGameSession.create_from_dict(content), None
if code == 400:
return None, ResponseError.create_from_dict(content)
if code == 401:
return None, ResponseError.create_from_dict(content)
if code == 403:
return None, ResponseError.create_from_dict(content)
if code == 404:
return None, ResponseError.create_from_dict(content)
if code == 500:
return None, ResponseError.create_from_dict(content)
return self.handle_undocumented_response(
code=code, content_type=content_type, content=content
)
# endregion response methods
# region static methods
@classmethod
def create(
cls, body: ApiBackFillAcceptRequest, backfill_id: str, namespace: str, **kwargs
) -> AcceptBackfill:
instance = cls()
instance.body = body
instance.backfill_id = backfill_id
instance.namespace = namespace
return instance
@classmethod
def create_from_dict(
cls, dict_: dict, include_empty: bool = False
) -> AcceptBackfill:
instance = cls()
if "body" in dict_ and dict_["body"] is not None:
instance.body = ApiBackFillAcceptRequest.create_from_dict(
dict_["body"], include_empty=include_empty
)
elif include_empty:
instance.body = ApiBackFillAcceptRequest()
if "backfillID" in dict_ and dict_["backfillID"] is not None:
instance.backfill_id = str(dict_["backfillID"])
elif include_empty:
instance.backfill_id = ""
if "namespace" in dict_ and dict_["namespace"] is not None:
instance.namespace = str(dict_["namespace"])
elif include_empty:
instance.namespace = ""
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"body": "body",
"backfillID": "backfill_id",
"namespace": "namespace",
}
@staticmethod
def get_required_map() -> Dict[str, bool]:
return {
"body": True,
"backfillID": True,
"namespace": True,
}
# endregion static methods
| [
"elmernocon@gmail.com"
] | elmernocon@gmail.com |
d2086cfc57ff171d6201a1bd3fbc011282d61392 | 65e82f0f584711084f2cf2cee6f343c6693ca552 | /LeetCode_exercises/ex0050_pow.py | 7296795a0db57b1be33e5f2378bbb40174bf865e | [] | no_license | msjithin/LeetCode_exercises | 3fd7e9e3dbc75270c2cb497931cdbdc9135d113d | f1b466a5f2ffc9ff00a0d9895bda145eb3c7db54 | refs/heads/master | 2023-02-16T00:15:41.676725 | 2021-01-05T18:53:07 | 2021-01-05T18:53:07 | 305,853,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | """
Implement pow(x, n), which calculates x raised to the power n (i.e. xn).
"""
class Solution:
def myPow( x: float, n: int) -> float:
print(' Calcuating {} ^ {}'.format(x, n) )
if x==0:
return 0
if n==0:
return 1
if abs(x) == 1.0:
return 1 if n%2==0 else x
num = x
npow = abs(n)
f_out=1
counter=0
while npow>0:
if f_out==0:
break
tmp , tmpf_out=1, num
while tmp+tmp < npow :
counter+=1
if tmp==0 or tmpf_out==1 :
break
tmp+=tmp
tmpf_out*=tmpf_out
counter+=1
npow-=tmp
f_out *= tmpf_out
print(' outer loop counter = ', counter , 'f_out=', f_out,'')
if n < 0:
return 1/f_out
return f_out
class Solution2:
def myPow( x: float, n: int) -> float:
print('calculate {} ^{}'.format(x,n))
if n<0:
x = 1/x
n = -n
if abs(x)==1:
return 1 if n%2==0 else x
res = 1
current_product = x
while n>0:
if current_product==0 or current_product==1:
res=current_product
break
if n%2:
res *= current_product
current_product *= current_product
n=n//2
return res | [
"ms@wisc.edu"
] | ms@wisc.edu |
058ad0c1afc34a157275f486480d9005b80911f4 | 51d94a83baff0adce5e2cafde488add67c23fe30 | /ipython/bin/jupyter-qtconsole | 327eb3bdaf3603c29ba9f2586a0d2bcb6d5c7bfe | [] | no_license | odulzaides/virtualenvs | 2add8aef6488c7903a7c24abef626da22aa192ad | 95a8a5fc08899f33a3ab478195916774db36d062 | refs/heads/master | 2020-05-30T07:13:12.803174 | 2016-10-06T20:34:13 | 2016-10-06T20:34:13 | 70,190,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/Users/Tuti/Documents/Virtualenv/ipython/bin/python2.7
# -*- coding: utf-8 -*-
import re
import sys
from qtconsole.qtconsoleapp import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"dulzaides.oscar@gmail.com"
] | dulzaides.oscar@gmail.com | |
869867848ac4fa8e57b44049e3616774b3f462a3 | 49327a8ea95879bd6cf1350bb50bfb3aaeb3337d | /rsc/snippets/recursion_for-loop.py | dbba0eb3a2b4c13fd16032dada22c9622a55ba8a | [] | no_license | paast/LC-challenge | 553676a5549617d221dfd943ba3bd7356f5e0bd8 | 28aed23f2243a10e24e9c059ebf109ae8679ad08 | refs/heads/master | 2020-04-07T15:04:56.660706 | 2019-01-24T22:59:35 | 2019-01-24T22:59:35 | 158,469,338 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | my_list = [1, 2, 3, 4, 5]
for item in my_list:
print(item, end=' ')
# will print "1 2 3 4 5"
| [
"banana-man@snposaw.com"
] | banana-man@snposaw.com |
819d2477a179823c7162021849a499b3398f8026 | 824da02c8fb0929a91155031430939e0f565390e | /migrations/versions/6cf36da6705f_.py | 299ea394a3c738b3b6ac6015aaa85d2ddef4333a | [
"Apache-2.0"
] | permissive | alexmeigz/Nutriflix_Flask_Backend | 58247070ad525861b7fc66001424b2d18305a626 | 95c9981b9342b0509d5c58ea6e9f2a8b400d1444 | refs/heads/master | 2023-01-30T01:10:09.642554 | 2020-12-14T20:04:41 | 2020-12-14T20:04:41 | 301,246,429 | 0 | 0 | Apache-2.0 | 2020-12-14T20:04:42 | 2020-10-04T23:16:13 | Python | UTF-8 | Python | false | false | 657 | py | """empty message
Revision ID: 6cf36da6705f
Revises: 6561c4454534
Create Date: 2020-11-15 02:32:23.520188
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6cf36da6705f'
down_revision = '6561c4454534'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('product', sa.Column('image_url', sa.Text(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('product', 'image_url')
# ### end Alembic commands ###
| [
"alexmei@ucsb.edu"
] | alexmei@ucsb.edu |
2b53d40a285c9d031d96a39ef176c489d8ebce4f | a073d77d10fc502593caad1d7784e18595c07649 | /DataTraining.py | 0e2dd1fc3fdf5a7c0b270608ab4bba331ab4ca9a | [] | no_license | ChrisRRadford/Cyanobacteria-Image-Classifier | 6eb92edcb7ce97a647bde7368448aad56a6beb0a | 223d045080cc2f1df2b0b3ed7b4cbe6e2b9ca46b | refs/heads/master | 2021-01-03T15:05:46.020024 | 2020-02-12T22:23:27 | 2020-02-12T22:23:27 | 240,122,948 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,541 | py | #python3 DataTraining.py --image "/Volumes/EXTERNAL/ClassifierImageSets/Origional_2.png" --fileName "Training"
import cv2
import argparse
import numpy as np
import pandas as pd
import wx
ap = argparse.ArgumentParser()
ap.add_argument("-1", "--Image", required=True, help="Image to be trained on")
ap.add_argument("-2", "--SaveName", required=True, help="Name of file to be saved")
args = vars(ap.parse_args())
image = cv2.imread(args["Image"])
fileName = (args["SaveName"])
currClass = -1
MasterList = np.empty((0,6), dtype = int)
count = 0
MasterClassCount = [0,0,0]
# onclick function
def click_and_crop(event, x,y,flags,param):
global currClass, image, MasterList, MasterClassCount
if event == cv2.EVENT_LBUTTONDBLCLK :
if currClass not in range (1,4):
print("Current Class not spceified Within Range (1,2,3)")
else:
refPt = [y,x]
px = image[y,x]
tempList = np.array([[refPt[1],refPt[0],px[0],px[1],px[2],currClass]])
MasterList = np.append(MasterList,tempList,axis=0)
if currClass == 1:
cv2.rectangle(image,(x-5,y-5),(x+5,y+5),(0,255,0),-1)
MasterClassCount[0] +=1
elif currClass == 2:
cv2.rectangle(image,(x-5,y-5),(x+5,y+5),(0,0,255),-1)
MasterClassCount[1] +=1
else:
cv2.rectangle(image,(x-5,y-5),(x+5,y+5),(122,122,122),-1)
MasterClassCount[2] +=1
print("Class count", MasterClassCount ,"To be added:", tempList, end="\r")
def main():
app = wx.App(True)
width, height = wx.GetDisplaySize()
del(app)
global currClass,image, MasterList
cv2.namedWindow('img',cv2.WINDOW_NORMAL)
cv2.resizeWindow('img', (int(width*0.9),int(height*0.9)))
cv2.moveWindow("img", 20,20);
cv2.setMouseCallback("img",click_and_crop)
while(True):
cv2.imshow("img",image)
key = cv2.waitKey(1) & 0xFF
# if the '1' key is pressed. Is Algae
if key == ord("1"):
print("Now selecting on Class 1 (Algae)")
currClass = 1
# if the '2' key is pressed. Isn't Algae
if key == ord("2"):
print("Now selecting on Class 2 (Non-Algae)")
currClass = 2
# if the '3' key is pressed. Is background
if key == ord("3"):
print("Now selecting on Class 3 (Background)")
currClass = 3
# if the 's' key is pressed. Save MasterList
if key == ord("s"):
print("Saving...")
#SAVE HERE
dataFrame = pd.DataFrame(data=MasterList, columns=["xCord","yCord","bBand","gBand","rBand","class"])
path = fileName + ".csv"
dataFrame.to_csv(path,index=False)
print("Saved")
# if the 'q' key is pressed. Quit
if key == ord("q"):
print("Exiting")
break
main() | [
"cradford@bell.net"
] | cradford@bell.net |
a60e660a37291658a05b42e59b648eddeb9f74c9 | b8515727e6c30e2b606ef11e8f35073b787aa852 | /todolist_app/tests.py | 0ba75cc60bf05ef12f5f5f9aa841eb3e6dd38c11 | [] | no_license | aramis-eb/todolist | ac1d689d036d83f0be7f266c4f7e8250dfdeac4c | c6839a388573b460b274bf9376d12c153e961571 | refs/heads/master | 2021-09-24T14:52:21.294270 | 2021-03-30T13:03:54 | 2021-03-30T13:03:54 | 254,407,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,523 | py | from django.test import TestCase, Client
from django.contrib.auth.models import User
from .models import Priority, Todo
class TestTodoList(TestCase):
def test_todo_list_login(self):
client = Client()
user = User.objects.create(username='aramis')
user.set_password('agustin20')
user.save()
response = client.post('/login', {
'username': 'aramis', 'password': 'agustin20'
})
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/')
def test_todo_list_invalid_login(self):
client = Client()
user = User.objects.create(username='aramis')
user.set_password('agustin20')
user.save()
response = client.post(
'/login',
{
'username': 'aramis',
'password': 'passwordinvalid'
},
follow=True,
)
self.assertEquals(response.status_code, 200)
def test_todo_list_incorrect_login(self):
c = Client()
result = c.login(
username='aramis',
password='invalidPassword'
)
self.assertFalse(result)
def test_todo_list_user_logged(self):
c = Client()
user = User.objects.create(username='testuser')
user.set_password('12345')
user.save()
c.login(username='testuser', password='12345')
response = c.get('')
self.assertEqual(response.status_code, 200)
class TestTodoCreate(TestCase):
def setUp(self):
self.priority = Priority.objects.create(name='Low', order=1)
self.client = Client()
self.user = User.objects.create(username='aramis')
self.user.set_password('agustin20')
self.user.save()
def test_create_todo_status_code(self):
self.client.force_login(self.user)
response = self.client.post('/new/', {
'tittle': 'Taskk 1',
'description': 'Task 2',
'done': False,
'priority': self.priority.id
})
self.assertEquals(response.status_code, 302)
def test_create_todo(self):
# Otra forma de hacer login
self.client.force_login(self.user)
# self.client.login(
# username='aramis',
# password='agustin20'
# )
response = self.client.post('/new/', {
'tittle': 'Taskk 1',
'description': 'Task 2',
'done': False,
'priority': self.priority.id
}, follow=True)
id = response.context['object'].id
self.assertRedirects(response, '/view/'+str(id))
self.assertEquals(response.status_code, 200)
def test_failed_create_todo(self):
self.client.force_login(self.user)
response = self.client.post('/new/', {
'tittle': 'Taskk 1',
'description': '',
'done': False,
'priority': self.priority.id
})
self.assertEquals(response.status_code, 200)
class TestTodoDelete(TestCase):
def test_delete_todo(self):
pass
class TestTodoView(TestCase):
def setUp(self):
todo = Todo.objects.create(
tittle='task1',
description='Descripcion de tarea 1',
assigned_user=self.user,
done=False,
created='2020-04-10',
updated='2020-04-10',
created_by=self.user,
updated_by=self.user,
priority=self.priority,
)
| [
"aramis@eventbrite.com"
] | aramis@eventbrite.com |
94e3d38dd3a5674a0272aeb4ea010d9f7a9abfd2 | 7dcdd5de0640f07b01b1707c134ec0bd168f641d | /fedora_college/modules/content/views.py | b1019c221326d657588aa1b01f790aaa7115edba | [
"BSD-3-Clause"
] | permissive | MSheezan/fedora-college | 8e3e741f6ddac481c2bb7bbcde1e70e2b4b56774 | 07dbce3652c6c1796fb0f7b208a706c9e9d90dc1 | refs/heads/master | 2021-01-15T22:38:16.831830 | 2014-06-26T07:04:33 | 2014-06-26T07:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,439 | py | # -*- coding: utf-8 -*-
import re
#import time
from unicodedata import normalize
from flask import Blueprint, render_template
from flask import redirect, url_for, g
from sqlalchemy import desc
from fedora_college.core.database import db
from fedora_college.modules.content.forms import * # noqa
from fedora_college.core.models import * # noqa
from flask_fas_openid import fas_login_required
bundle = Blueprint('content', __name__, template_folder='templates')
from fedora_college.modules.content.media import * # noqa
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'-'):
"""Generates an slightly worse ASCII-only slug."""
#stri = (time.strftime("%d/%m/%Y"))
#text = stri + "-" + text
result = []
for word in _punct_re.split(text.lower()):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def attach_tags(tags, content):
rem = TagsMap.query.filter_by(content_id=content.content_id).all()
for r in rem:
db.session.delete(r)
db.session.commit()
for tag in tags:
tag_db = Tags.query.filter_by(tag_text=tag).first()
if tag_db is None:
tag_db = Tags(tag)
db.session.add(tag_db)
db.session.commit()
Map = TagsMap(tag_db.tag_id, content.content_id)
db.session.add(Map)
db.session.commit()
@bundle.route('/content/add/', methods=['GET', 'POST'])
@bundle.route('/content/add', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>/', methods=['GET', 'POST'])
@bundle.route('/content/edit/<posturl>', methods=['GET', 'POST'])
@fas_login_required
def addcontent(posturl=None):
form = CreateContent()
form_action = url_for('content.addcontent')
media = Media.query.order_by(desc(Media.timestamp)).limit(10).all()
if posturl is not None:
content = Content.query.filter_by(slug=posturl).first_or_404()
form = CreateContent(obj=content)
if form.validate_on_submit():
form.populate_obj(content)
tags = str(form.tags.data).split(',')
attach_tags(tags, content)
content.rehtml()
db.session.commit()
return redirect(url_for('content.addcontent',
posturl=posturl,
updated="Successfully updated")
)
else:
if form.validate_on_submit():
url_name = slugify(form.title.data)
query = Content(form.title.data,
url_name,
form.description.data,
form.active.data,
form.tags.data,
g.fas_user['username'],
form.type_content.data
)
tags = str(form.tags.data).split(',')
try:
db.session.add(query)
db.session.commit()
attach_tags(tags, query)
return redirect(url_for('content.addcontent',
posturl=url_name,
updated="Successfully updated",
media=media)
)
# Duplicate entry
except Exception as e:
db.session.rollback()
print e
pass
return render_template('content/edit_content.html', form=form,
form_action=form_action, title="Create Content",
media=media)
@bundle.route('/blog', methods=['GET', 'POST'])
@bundle.route('/blog/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>/', methods=['GET', 'POST'])
@bundle.route('/blog/<slug>', methods=['GET', 'POST'])
def blog(slug=None):
if slug is not None:
try:
posts = Content.query. \
filter_by(slug=slug).all()
except:
posts = "No such posts in database."
else:
try:
posts = Content.query. \
filter_by(type_content="blog").all()
except:
posts = "Databse is empty"
return render_template('blog/index.html',
title='Blog',
content=posts)
| [
"hammadhaleem@gmail.com"
] | hammadhaleem@gmail.com |
c44e87e1ece4e52e9eeed64c2ed07b4a9e9918b5 | f8e52e6f6dfb55f30272af4336255ff6fe978c44 | /tools/mcc.py | d4a5be082822d9696147872e5134903f7608da9a | [] | no_license | egdman/kaggle | 1c167c3075dbb161ed0d517b8256a70ba08ff7d1 | a2ce7baddaff742efd9f2fabbda55e7d2eed9e52 | refs/heads/master | 2021-01-11T11:19:51.768729 | 2016-11-03T17:30:31 | 2016-11-03T17:30:31 | 72,687,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | from numba import jit
import numpy as np
from matplotlib import pyplot as plt
# @jit
def mcc(tp, tn, fp, fn):
sup = tp * tn - fp * fn
inf = (tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)
if inf==0:
return 0
else:
return sup / np.sqrt(inf)
# @jit
def eval_mcc(y_true, y_prob, show=False):
idx = np.argsort(y_prob)
y_true_sort = y_true[idx]
n = y_true.shape[0]
nump = 1.0 * np.sum(y_true) # number of positive
numn = n - nump # number of negative
tp = nump
tn = 0.0
fp = numn
fn = 0.0
best_mcc = 0.0
best_id = -1
prev_proba = -1
best_proba = -1
mccs = np.zeros(n)
for i in range(n):
# all items with idx < i are predicted negative while others are predicted positive
# only evaluate mcc when probability changes
proba = y_prob[idx[i]]
if proba != prev_proba:
prev_proba = proba
new_mcc = mcc(tp, tn, fp, fn)
if new_mcc >= best_mcc:
best_mcc = new_mcc
best_id = i
best_proba = proba
mccs[i] = new_mcc
if y_true_sort[i] == 1:
tp -= 1.0
fn += 1.0
else:
fp -= 1.0
tn += 1.0
if show:
y_pred = (y_prob >= best_proba).astype(int)
score = matthews_corrcoef(y_true, y_pred)
print(score, best_mcc)
plt.plot(mccs)
return best_proba, best_mcc, y_pred
else:
return best_mcc
def mcc_eval(y_prob, dtrain):
y_true = dtrain.get_label()
best_mcc = eval_mcc(y_true, y_prob)
return 'MCC', best_mcc
| [
"egdman90@gmail.com"
] | egdman90@gmail.com |
9707a59e2633a3a8f14afee94806f2e665985bcb | 939328b01c66aa635e62fb0679e2e17a69e140d9 | /lrec18/experiments/significance_tests.py | 282fec45aa4d92b3afc6cb69b032874d7d2f42e7 | [
"MIT"
] | permissive | weiweitoo/EmoMap | a951995064f99f686f27f41f93f79fc97a0b5240 | 86e326bca58b89539b058a0a69a0f8f9e6726c4e | refs/heads/master | 2020-11-28T08:33:05.791696 | 2020-01-01T09:05:08 | 2020-01-01T09:05:08 | 229,756,998 | 0 | 0 | MIT | 2019-12-23T13:21:43 | 2019-12-23T13:21:42 | null | UTF-8 | Python | false | false | 6,552 | py | import pandas as pd
import scipy.stats as st
import numpy as np
import prepare_data
def ttest(xbar, mu, N, s, tails=1):
'''
One sample t-test from summary statistics.
'''
t = (mu-xbar)*np.sqrt(float(N))/s
if tails==1:
if t>=0:
p=1-st.t.cdf(x=t, df=N)
else:
p=1-st.t.cdf(x=-t, df=N)
else:
raise NotImplementedError
return t,p
def ztest(sample_mean, pop_mean, N, pop_sd, tails=1):
'''
z-test
'''
standard_error=float(pop_sd)/np.sqrt(float(N))
z=(float(sample_mean)-float(pop_mean))/standard_error
if tails==1:
if z>=0:
p=1-st.norm.cdf(x=z)
else:
p=1-st.norm.cdf(x=-z)
else:
raise NotImplementedError
return z,p
def correlation_test(sample_corr, pop_corr, N, tails=1):
'''
Tests if empirical sample correlation is significantly higher than an given
population correlation. Performs Fisher's r to z transformation and than
computes z test.
'''
z_r=.5*np.log((1+sample_corr)/(1-sample_corr))
z_rho=.5*np.log((1+pop_corr)/(1-pop_corr))
standard_error=1./(np.sqrt(float(N-3)))
z=(z_r-z_rho)/standard_error
if tails==1:
if z>=0:
p=1-st.norm.cdf(x=z)
else:
p=1-st.norm.cdf(x=-z)
else:
raise NotImplementedError
return z,p
def size_of_gold_data(outpath):
languages=['English', 'Spanish', 'German', 'Polish']
data = {
'English':prepare_data.get_english(),
'Spanish':prepare_data.get_spanish(),
'Polish':prepare_data.get_polish(),
'German':prepare_data.get_german()
}
table=pd.DataFrame(columns=['N'], index=languages)
for lang in languages:
print(data[lang].shape[0])
table['N'][lang]=data[lang].shape[0]
table.to_csv(outpath, sep='\t')
format_string= ':02.5f'
def FORMATTER(x):
return '{:02.5f}'.format(x)
def significance_tests_for_experiment_1(path_inter_study_reliability,
path_prediction_mean,
path_prediction_std,
outpath_t_table,
outpath_p_table):
'''
One-tailed one sample t-test in 10-fold cross validation setup.
Tests if mean over correlation values is higher than lowest inter-study
reliability (degrees of freedom: 9).
Adapted from Dietterich, T. G. (1998). Approximate statistical tests for
comparing supervised classification learning algorithms. Neural
Computation, 10(7), 1895โ1923.
'''
inter_study_reliability=pd.read_csv(path_inter_study_reliability,
index_col=0, sep='\t')
human_floor=inter_study_reliability.min(axis=0)
results_mean=pd.read_csv(path_prediction_mean,
sep='\t',
index_col=0)
results_std=pd.read_csv(path_prediction_std,
sep='\t',
index_col=0)
languages=['English', 'Spanish', 'German', 'Polish']
dimensions=['Valence', 'Arousal', 'Dominance']
t_table=pd.DataFrame(columns=dimensions, index=languages)
p_table=pd.DataFrame(columns=dimensions, index=languages)
for lang in languages:
for dim in dimensions:
pred=results_mean[dim][lang]
human=human_floor[dim]
if pred>human:
t,p=ttest(xbar=pred,
mu=human,
N=9,
s=results_std[dim][lang])
t_table[dim][lang]=t
p_table[dim][lang]=p
t_table=t_table.round(4)
p_table=p_table.round(4)
print(t_table.to_string(float_format=FORMATTER))
print(p_table.to_string(float_format=FORMATTER))
t_table.to_csv(outpath_t_table, sep='\t',float_format=FORMATTER)
p_table.to_csv(outpath_p_table, sep='\t', float_format=FORMATTER)
def significance_tests_for_experiment_2(path_inter_study_reliability,
path_gold_data_size,
path_prediction,
outpath_z_table,
outpath_p_table):
'''
Computes significance tests whether the experimental results from
experiment 2 are higher than the human ceiling. Uses one-tailed z-tests
with fisher r to z transformation.
'''
# Setting everything up
languages=['English', 'Spanish', 'German', 'Polish']
dimensions=['Valence', 'Arousal', 'Dominance']
langcodes={'en':'English', 'es':'Spanish', 'pl':'Polish', 'de':'German'}
inter_study_reliability=pd.read_csv(path_inter_study_reliability,
index_col=0, sep='\t')
gold_data_size=pd.read_csv(path_gold_data_size, sep='\t', index_col=0)
human_floor=inter_study_reliability.min(axis=0)
experimental_results=pd.read_csv(path_prediction, sep='\t', index_col=0)
z_table=pd.DataFrame(columns=dimensions,
index=experimental_results.index)
p_table=pd.DataFrame(columns=dimensions,
index=experimental_results.index)
# Perform tests
for case in list(experimental_results.index):
parts=case.split('2')
source_language=langcodes[parts[0]]
target_language=langcodes[parts[1]]
for dim in dimensions:
pred=experimental_results[dim][case]
human=human_floor[dim]
n=gold_data_size['N'][target_language]
# print(source_language, target_language, n, dim, pred, human)
if pred > human:
z,p=correlation_test(sample_corr=pred,
pop_corr=human,
N=n)
z_table[dim][case]=z
p_table[dim][case]=p
# Output
z_table=z_table.round(4)
p_table=p_table.round(4)
print(z_table.to_string(float_format=FORMATTER))
z_table.to_csv(outpath_z_table, sep='\t', float_format=FORMATTER)
print(p_table.to_string(float_format=FORMATTER))
p_table.to_csv(outpath_p_table, sep='\t', float_format=FORMATTER)
def significance_tests_for_experiment_3(path_inter_study_reliability,
path_gold_data_size,
path_prediction,
outpath_z_table,
outpath_p_table):
# Setting everything up
dimensions=['Valence', 'Arousal', 'Dominance']
inter_study_reliability=pd.read_csv(path_inter_study_reliability,
index_col=0, sep='\t')
gold_data_size=pd.read_csv(path_gold_data_size, sep='\t', index_col=0)
human_floor=inter_study_reliability.min(axis=0)
experimental_results=pd.read_csv(path_prediction, sep='\t', index_col=0)
z_table=pd.DataFrame(columns=dimensions,
index=experimental_results.index)
p_table=pd.DataFrame(columns=dimensions,
index=experimental_results.index)
# Performing tests
for lang in list(experimental_results.index):
for dim in dimensions:
pred=experimental_results[dim][lang]
human=human_floor[dim]
n=gold_data_size['N'][lang]
if pred>human:
z,p=correlation_test(sample_corr=pred,
pop_corr=human,
N=n)
z_table[dim][lang]=z
p_table[dim][lang]=p
# Output
z_table=z_table.round(4)
p_table=p_table.round(4)
print(z_table.to_string(float_format=FORMATTER))
z_table.to_csv(outpath_z_table, sep='\t', float_format=FORMATTER)
print(p_table.to_string(float_format=FORMATTER))
p_table.to_csv(outpath_p_table, sep='\t', float_format=FORMATTER)
| [
"sven-buechel@gmx.de"
] | sven-buechel@gmx.de |
648f3b696550c88369e2a3b934622074751216dc | accfbf16b85a532db73e2b30540d4f1a1f5f088f | /vscode-python3/workspace/init.py | 4806d926160f00b68c31c996ad5ebe01ee612c66 | [] | no_license | BhawickJain/docker-setups | 4e3cbf8e48ec7be15b264e94676fa2b8a84033ce | 48f417048fca27bab5ad982bf157f98dad0d92c0 | refs/heads/main | 2023-07-15T00:15:26.202658 | 2021-08-16T19:32:42 | 2021-08-16T19:32:42 | 396,942,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | print("hello", "Bhawick")
print("you are bug!")
print("debug! AWAYAY!") | [
"bhawick@outlook.com"
] | bhawick@outlook.com |
82d8e508bea9d27e596ec5fd5f94d4d16fc0ca40 | 085406a6754c33957ca694878db9bbe37f84b970 | /็ฝ็ป็ผ็จ/08-ssh_socket_client.py | b91da548705606b59b6c0eb6b8d70cdbb3050767 | [] | no_license | dewlytg/Python-example | 82157958da198ce42014e678dfe507c72ed67ef0 | 1e179e4037eccd9fefabefd252b060564a2eafce | refs/heads/master | 2021-01-01T18:36:08.868861 | 2019-01-18T10:39:08 | 2019-01-18T10:39:08 | 98,375,528 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/bin/env python
"""
socket client for ssh
"""
import socket
client = socket.socket()
client.connect(("localhost",9999))
while True:
#ๆฏๆๅฎขๆท็ซฏๅพช็ฏๅ้ๆฐๆฎๅฐๆๅก็ซฏ
cmd = input(">>:").strip()
if len(cmd) == 0:continue
client.send(cmd.encode()) #python3ไธญๅฟ
้กปๆๅญ็ฌฆไธฒ่ฝฌๆขไธบbytes็ฑปๅ๏ผ่ฟ้ๅฏไปฅ็่งฃๅญ็ฌฆไธฒ็ฑปๅๆฏutf-8
cmd_res_size = client.recv(1024)
print("ๅฝไปค็ปๆๅคงๅฐ:",cmd_res_size)
client.send("please input somthing in order to packet splicing".encode()) #ๆไปฃ็ ๆพๅฐLinuxๆง่กไผๅ็็ฒๅ
้่ฏฏ๏ผ่ฟไธชๅฏไปฅ้ฟๅ
้่ฏฏๅ็
received_size = 0
received_data = b''
while received_size != int(cmd_res_size.decode()): #cmd_res_sizeๆฏbytes็ฑปๅ็ๆฐๆฎ๏ผ้่ฆไฝฟ็จdecode่ฝฌๆขไธบๅญ็ฌฆไธฒ
data = client.recv(1024)
received_size += len(data)
received_data += data
else:
print("cmd res receive done...",received_size)
print(received_data.decode())
client.close() | [
"gang.tang@cutt.com"
] | gang.tang@cutt.com |
166670300dc3fb39d4e1883bb546d056fe08ce1f | dd09f3ad02785935043b56ea3ef85ed603f4065d | /Sorting_Function/Selection_Sorting.py | 6f03147ffab2db72cf7d3f242eb1efd76270e240 | [] | no_license | RishavMishraRM/Data_Structure | ed70f5a04c2fa8153433e830ef54deb7b9c8bf21 | 0d31d16b48989359d5fef79b00aac1b9ca112a22 | refs/heads/main | 2023-06-27T02:40:18.031146 | 2021-07-25T19:01:51 | 2021-07-25T19:01:51 | 330,320,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | def selection_sort(A):
n = len(A)
for i in range(n-1):
position = i
for j in range(i+1, n):
if A[j] < A[position]:
position = j
temp = A[i]
A[i] = A[position]
A[position] = temp
A = [3, 5, 8, 9, 6, 2]
print('Original Array:',A)
selection_sort(A)
print('Sorted Array:',A)
| [
"noreply@github.com"
] | RishavMishraRM.noreply@github.com |
a35795d31dc08e0b14b7e0fe34da7cd1f10a97c9 | 5c435eb62297101d80904957f857f1ceaa217071 | /3-distribution_training/py/args.py | bc8e1eb5322ac2d38c1589c880aac8194588f8ee | [] | no_license | chatflip/tutorial_pytorch_japanese | d1972cb342d4da6918a3c35120de7e45b1bed74e | 374251cfe1359cd5e305d3d33c1fdf3f03ad13dc | refs/heads/master | 2022-01-26T18:42:21.313784 | 2020-06-13T09:54:35 | 2020-06-13T09:54:35 | 165,519,038 | 10 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,543 | py | import argparse
def opt():
parser = argparse.ArgumentParser(description='PyTorch AnimeFace')
parser.add_argument('--path2db', type=str, default='data',
help='path to database')
# Train Validate settings
parser.add_argument('--batch-size', type=int, default=256,
help='mini-batch size in train')
parser.add_argument('--val-batch-size', type=int, default=512,
help='mini-batch size in validate')
parser.add_argument('--epochs', type=int, default=10,
help='number of total epochs to run')
parser.add_argument('--num_classes', type=int, default=176,
help='num of classes')
# network parameters
parser.add_argument('--lr', type=float, default=0.01,
help='initial learning rate')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
parser.add_argument('--weight_decay', type=float, default=1e-04,
help='weight_decay')
# etc
parser.add_argument('--evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--resume', type=str, default='weight/AnimeFace_resnet18_best.pth',
help='load weight')
parser.add_argument('--img_size', type=int, default=256,
help='image size')
parser.add_argument('--crop_size', type=int, default=224,
help='crop size')
parser.add_argument('--workers', type=int, default=16,
help='number of data loading workers')
parser.add_argument('--seed', type=int, default=1,
help='seed for initializing training. ')
parser.add_argument('--print-freq', type=int, default=10,
help='print frequency (default: 10)')
# Mixed precision training parameters
parser.add_argument('--apex', action='store_true',
help='Use apex for mixed precision training')
parser.add_argument('--apex-opt-level', default='O1', type=str,
help='For apex mixed precision training'
'O0 for FP32 training, O1 for mixed precision training.'
'For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet'
)
# distribution settings
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
args = parser.parse_args()
return args
| [
"oo.chat.flip@gmail.com"
] | oo.chat.flip@gmail.com |
2fb93afe829de7491a458ced6b6568ea178817ff | 488e0934b8cd97e202ae05368c855a57b299bfd1 | /Django/advanced/change_admin/change_admin/settings.py | 52ac0975d8daac947ffc100a34d19c9282aa57ff | [] | no_license | didemertens/udemy_webdev | 4d96a5e7abeec1848ecedb97f0c440cd50eb27ac | 306215571be8e4dcb939e79b18ff6b302b75c952 | refs/heads/master | 2020-04-25T00:24:45.654136 | 2019-04-13T16:00:47 | 2019-04-13T16:00:47 | 172,377,429 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | """
Django settings for change_admin project.
Generated by 'django-admin startproject' using Django 2.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
TEMPLATE_DIR = os.path.join(BASE_DIR,'templates')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '(w#6#!6oi75z@e2d&((yalznx95yk7exe5fbbx#f1l#0uc=(3w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app_videos'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'change_admin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'change_admin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
| [
"d.g.j.mertens@gmail.com"
] | d.g.j.mertens@gmail.com |
6f5e3b1bf3eda3e6b1a3ce9914c9e63edc2037a9 | d5ea8c4b49ee0b9f6261313efd6fc948d01bd92c | /optimization_specialist_demo.py | 5999bb755ac06700a9dca790192efc8364f3a2ea | [] | no_license | sannedonker/Evolutionary_computing | a57ec6e56c287bf2e84992ae3fe1a0f3ea08f526 | 63ebf18cf52a5cff3f4bc25c72601efa8423bf1a | refs/heads/master | 2020-07-27T08:42:32.162266 | 2019-10-24T20:36:58 | 2019-10-24T20:36:58 | 209,033,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,597 | py | ###############################################################################
# EvoMan FrameWork - V1.0 2016 #
# DEMO : Neuroevolution - Genetic Algorithm neural network. #
# Author: Karine Miras #
# karine.smiras@gmail.com #
###############################################################################
# imports framework
import sys
sys.path.insert(0, 'evoman')
from environment import Environment
from demo_controller import player_controller
# imports other libs
import time
import numpy as np
from math import fabs,sqrt
import glob, os
experiment_name = 'individual_demo'
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
# initializes simulation in individual evolution mode, for single static enemy.
env = Environment(experiment_name=experiment_name,
enemies=[2],
playermode="ai",
player_controller=player_controller(),
enemymode="static",
level=2,
speed="fastest")
# default environment fitness is assumed for experiment
env.state_to_log() # checks environment state
#### Optimization for controller solution (best genotype-weights for phenotype-network): Ganetic Algorihm ###
ini = time.time() # sets time marker
# genetic algorithm params
run_mode = 'train' # train or test
n_hidden = 10
n_vars = (env.get_num_sensors()+1)*n_hidden + (n_hidden+1)*5 # multilayer with 10 hidden neurons
dom_u = 1
dom_l = -1
npop = 10
gens = 5
mutation = 0.2
last_best = 0
# runs simulation
def simulation(env,x):
f,p,e,t = env.play(pcont=x)
return f
# normalizes
def norm(x, pfit_pop):
if ( max(pfit_pop) - min(pfit_pop) ) > 0:
x_norm = ( x - min(pfit_pop) )/( max(pfit_pop) - min(pfit_pop) )
else:
x_norm = 0
if x_norm <= 0:
x_norm = 0.0000000001
return x_norm
# evaluation
def evaluate(x):
return np.array(list(map(lambda y: simulation(env,y), x)))
# tournament
def tournament(pop):
c1 = np.random.randint(0,pop.shape[0], 1)
c2 = np.random.randint(0,pop.shape[0], 1)
if fit_pop[c1] > fit_pop[c2]:
return pop[c1][0]
else:
return pop[c2][0]
# limits
def limits(x):
if x>dom_u:
return dom_u
elif x<dom_l:
return dom_l
else:
return x
# crossover
def crossover(pop):
total_offspring = np.zeros((0,n_vars))
for p in range(0,pop.shape[0], 2):
p1 = tournament(pop)
p2 = tournament(pop)
n_offspring = np.random.randint(1,3+1, 1)[0]
offspring = np.zeros( (n_offspring, n_vars) )
for f in range(0,n_offspring):
cross_prop = np.random.uniform(0,1)
offspring[f] = p1*cross_prop+p2*(1-cross_prop)
# mutation
for i in range(0,len(offspring[f])):
if np.random.uniform(0 ,1)<=mutation:
offspring[f][i] = offspring[f][i]+np.random.normal(0, 1)
offspring[f] = np.array(list(map(lambda y: limits(y), offspring[f])))
total_offspring = np.vstack((total_offspring, offspring[f]))
return total_offspring
# kills the worst genomes, and replace with new best/random solutions
def doomsday(pop,fit_pop):
worst = int(npop/4) # a quarter of the population
order = np.argsort(fit_pop)
orderasc = order[0:worst]
for o in orderasc:
for j in range(0,n_vars):
pro = np.random.uniform(0,1)
if np.random.uniform(0,1) <= pro:
pop[o][j] = np.random.uniform(dom_l, dom_u) # random dna, uniform dist.
else:
pop[o][j] = pop[order[-1:]][0][j] # dna from best
fit_pop[o]=evaluate([pop[o]])
return pop,fit_pop
# loads file with the best solution for testing
if run_mode =='test':
bsol = np.loadtxt(experiment_name+'/best.txt')
print( '\n RUNNING SAVED BEST SOLUTION \n')
env.update_parameter('speed','normal')
evaluate([bsol])
sys.exit(0)
# initializes population loading old solutions or generating new ones
if not os.path.exists(experiment_name+'/evoman_solstate'):
print( '\nNEW EVOLUTION\n')
pop = np.random.uniform(dom_l, dom_u, (npop, n_vars))
fit_pop = evaluate(pop)
best = np.argmax(fit_pop)
mean = np.mean(fit_pop)
std = np.std(fit_pop)
ini_g = 0
solutions = [pop, fit_pop]
env.update_solutions(solutions)
else:
print( '\nCONTINUING EVOLUTION\n')
env.load_state()
pop = env.solutions[0]
fit_pop = env.solutions[1]
best = np.argmax(fit_pop)
mean = np.mean(fit_pop)
std = np.std(fit_pop)
# finds last generation number
file_aux = open(experiment_name+'/gen.txt','r')
ini_g = int(file_aux.readline())
file_aux.close()
# saves results for first pop
file_aux = open(experiment_name+'/results.txt','a')
file_aux.write('\n\ngen best mean std')
print( '\n GENERATION '+str(ini_g)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)))
file_aux.write('\n'+str(ini_g)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)) )
file_aux.close()
# evolution
last_sol = fit_pop[best]
notimproved = 0
for i in range(ini_g+1, gens):
offspring = crossover(pop) # crossover
fit_offspring = evaluate(offspring) # evaluation
pop = np.vstack((pop,offspring))
fit_pop = np.append(fit_pop,fit_offspring)
best = np.argmax(fit_pop) #best solution in generation
fit_pop[best] = float(evaluate(np.array([pop[best] ]))[0]) # repeats best eval, for stability issues
best_sol = fit_pop[best]
# selection
fit_pop_cp = fit_pop
fit_pop_norm = np.array(list(map(lambda y: norm(y,fit_pop_cp), fit_pop))) # avoiding negative probabilities, as fitness is ranges from negative numbers
probs = (fit_pop_norm)/(fit_pop_norm).sum()
chosen = np.random.choice(pop.shape[0], npop , p=probs, replace=False)
chosen = np.append(chosen[1:],best)
pop = pop[chosen]
fit_pop = fit_pop[chosen]
# searching new areas
if best_sol <= last_sol:
notimproved += 1
else:
last_sol = best_sol
notimproved = 0
if notimproved >= 15:
file_aux = open(experiment_name+'/results.txt','a')
file_aux.write('\ndoomsday')
file_aux.close()
pop, fit_pop = doomsday(pop,fit_pop)
notimproved = 0
best = np.argmax(fit_pop)
std = np.std(fit_pop)
mean = np.mean(fit_pop)
# saves results
file_aux = open(experiment_name+'/results.txt','a')
print( '\n GENERATION '+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)))
file_aux.write('\n'+str(i)+' '+str(round(fit_pop[best],6))+' '+str(round(mean,6))+' '+str(round(std,6)) )
file_aux.close()
# saves generation number
file_aux = open(experiment_name+'/gen.txt','w')
file_aux.write(str(i))
file_aux.close()
# saves file with the best solution
np.savetxt(experiment_name+'/best.txt',pop[best])
# saves simulation state
solutions = [pop, fit_pop]
env.update_solutions(solutions)
env.save_state()
fim = time.time() # prints total execution time for experiment
print( '\nExecution time: '+str(round((fim-ini)/60))+' minutes \n')
file = open(experiment_name+'/neuroended', 'w') # saves control (simulation has ended) file for bash loop file
file.close()
env.state_to_log() # checks environment state
| [
"s.a.m.donker@hotmail.com"
] | s.a.m.donker@hotmail.com |
ee4ca603bda625183659e699f11fd7d710b1f6e2 | 0e1e643e864bcb96cf06f14f4cb559b034e114d0 | /Exps_7_v3/W_w_Mgt_to_C/pyramid_tight_crop_size256_pad60_jit15/pyr_2s/bce_s001_tv_s0p1_L4/step10_a.py | eb5df86c53b26c8ee94837558fc32d4559cdad0a | [] | no_license | KongBOy/kong_model2 | 33a94a9d2be5b0f28f9d479b3744e1d0e0ebd307 | 1af20b168ffccf0d5293a393a40a9fa9519410b2 | refs/heads/master | 2022-10-14T03:09:22.543998 | 2022-10-06T11:33:42 | 2022-10-06T11:33:42 | 242,080,692 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,105 | py | #############################################################################################################################################################################################################
#############################################################################################################################################################################################################
### ๆ kong_model2 ๅ ๅ
ฅ sys.path
import os
code_exe_path = os.path.realpath(__file__) ### ็ฎๅๅท่ก step10_b.py ็ path
code_exe_path_element = code_exe_path.split("\\") ### ๆ path ๅๅ ็ญ็ญ ่ฆๆพๅบ kong_model ๅจ็ฌฌๅนพๅฑค
code_dir = "\\".join(code_exe_path_element[:-1])
kong_layer = code_exe_path_element.index("kong_model2") ### ๆพๅบ kong_model2 ๅจ็ฌฌๅนพๅฑค
kong_model2_dir = "\\".join(code_exe_path_element[:kong_layer + 1]) ### ๅฎไฝๅบ kong_model2 ็ dir
import sys ### ๆ kong_model2 ๅ ๅ
ฅ sys.path
sys.path.append(kong_model2_dir)
sys.path.append(code_dir)
# print(__file__.split("\\")[-1])
# print(" code_exe_path:", code_exe_path)
# print(" code_exe_path_element:", code_exe_path_element)
# print(" code_dir:", code_dir)
# print(" kong_layer:", kong_layer)
# print(" kong_model2_dir:", kong_model2_dir)
#############################################################################################################################################################################################################
kong_to_py_layer = len(code_exe_path_element) - 1 - kong_layer ### ไธญ้ -1 ๆฏ็บไบ้ทๅบฆ่ฝindex
# print(" kong_to_py_layer:", kong_to_py_layer)
if (kong_to_py_layer == 0): template_dir = ""
elif(kong_to_py_layer == 2): template_dir = code_exe_path_element[kong_layer + 1][0:] ### [7:] ๆฏ็บไบๅปๆ step1x_๏ผ ๅพไพ่ฆบๅพๅฅฝๅๆนๆๆ็พฉ็ๅๅญไธๅปๆไน่กๆไปฅ ๆน 0
elif(kong_to_py_layer == 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] ### [5:] ๆฏ็บไบๅปๆ mask_ ๏ผๅ้ข็ mask_ ๆฏ็บไบpython ็ module ไธ่ฝ ๆธๅญ้้ ญ๏ผ ้จไพฟๅ ็้ๆจฃๅญ๏ผ ๅพไพ่ฆบๅพ ่ชๅๆ็้ ๅบไนๅฏไปฅๆฅๅ๏ผ ๆไปฅ ๆน0
elif(kong_to_py_layer > 3): template_dir = code_exe_path_element[kong_layer + 1][0:] + "/" + code_exe_path_element[kong_layer + 2][0:] + "/" + "/".join(code_exe_path_element[kong_layer + 3: -1])
# print(" template_dir:", template_dir) ### ่ไพ๏ผ template_dir: 7_mask_unet/5_os_book_and_paper_have_dtd_hdr_mix_bg_tv_s04_mae
#############################################################################################################################################################################################################
exp_dir = template_dir
#############################################################################################################################################################################################################
from step06_a_datas_obj import *
from step09_2side_L4 import *
from step10_a2_loss_info_obj import *
from step10_b2_exp_builder import Exp_builder
rm_paths = [path for path in sys.path if code_dir in path]
for rm_path in rm_paths: sys.path.remove(rm_path)
rm_moduless = [module for module in sys.modules if "step09" in module]
for rm_module in rm_moduless: del sys.modules[rm_module]
#############################################################################################################################################################################################################
'''
exp_dir ๆฏ ๆฑบๅฎ result_dir ็ "ไธไธๅฑค"่ณๆๅคพ ๅๅญๅ๏ผ exp_dir่ฆๅทข็ไนๆฒๅ้ก๏ฝ
ๆฏๅฆ๏ผexp_dir = "6_mask_unet/่ชๅทฑๅฝ็ๅๅญ"๏ผ้ฃ result_dir ๅฐฑ้ฝๅจ๏ผ
6_mask_unet/่ชๅทฑๅฝ็ๅๅญ/result_a
6_mask_unet/่ชๅทฑๅฝ็ๅๅญ/result_b
6_mask_unet/่ชๅทฑๅฝ็ๅๅญ/...
'''
use_db_obj = type8_blender_wc_flow
use_loss_obj = [G_mae_s001_loss_info_builder.set_loss_target("UNet_Cx").copy(), G_mae_s001_loss_info_builder.set_loss_target("UNet_Cy").copy()] ### x, y ้ ๅบๆฏ็ step07_b_0b_Multi_UNet ไพๅฐๆ็ๅ
#############################################################
### ็บไบresul_analyze็ซ็ฉบ็ฝ็ๅ๏ผๅปบไธๅempty็ Exp_builder
empty = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="็บไบresul_analyze็ซ็ฉบ็ฝ็ๅ๏ผๅปบไธๅempty็ Exp_builder")
#############################################################
ch032_1side_1__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_1__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_1__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_2__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_2__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_2__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_3__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_3__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_3__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_4__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_4__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_4__2side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_1 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_1, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_1.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_2 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_2, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_2.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_3 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_3, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_3.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_4 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_4, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_4.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
ch032_1side_5__2side_5 = Exp_builder().set_basic("train", use_db_obj, ch032_pyramid_1side_5__2side_5, use_loss_obj, exp_dir=exp_dir, code_exe_path=code_exe_path, describe_end=ch032_pyramid_1side_5__2side_5.kong_model.model_describe) .set_train_args(epochs= 60).set_train_in_gt_use_range(use_in_range=Range(0, 1), use_gt_range=Range(0, 1)).set_result_name(result_name="")
#############################################################
if(__name__ == "__main__"):
print("build exps cost time:", time.time() - start_time)
if len(sys.argv) < 2:
############################################################################################################
### ็ดๆฅๆ F5 ๆๆ python step10_b1_exp_obj_load_and_train_and_test.py๏ผๅพ้ขๆฒๆๆฅๆฑ่ฅฟๅ๏ผๆไธๆ่ทๅฐไธ้ข็ตฆ step10_b_subprocss.py ็จ็็จๅผ็ขผ~~~
ch032_1side_1__2side_0.build().run()
# print('no argument')
sys.exit()
### ไปฅไธๆฏ็ตฆ step10_b_subprocess.py ็จ็๏ผ็ธ็ถๆผcmdๆ python step10_b1_exp_obj_load_and_train_and_test.py ๆๅexp.build().run()
eval(sys.argv[1])
| [
"s89334roy@yahoo.com.tw"
] | s89334roy@yahoo.com.tw |
9f3bc36cda5789eecda861f4e2fef3c900573070 | b6f5be56f502b113a03af54abf4b9790bfdd7545 | /gesteNemo.py | 68e936f943890ce6414635db964d2b0e29150cab | [] | no_license | PonteIneptique/geste-nemo | dc049e414ad23137ff54577b3157d1eb7c44fbac | c73069028f810f1b7d80420d72ab79602f477a2e | refs/heads/master | 2020-03-20T13:02:07.260577 | 2018-06-14T17:13:35 | 2018-06-14T17:13:35 | 137,446,875 | 0 | 0 | null | 2018-06-15T06:07:08 | 2018-06-15T05:58:41 | HTML | UTF-8 | Python | false | false | 3,378 | py | # Import Flask and Nemo
# This script can take a first argument giving a configuration from examples.py
from flask import Flask
from flask_nemo import Nemo
from flask_caching import Cache
from flask_nemo.chunker import level_grouper
from capitains_nautilus.cts.resolver import NautilusCTSResolver
from MyCapytain.resources.prototypes.cts.inventory import CtsTextInventoryCollection as TextInventoryCollection, CtsTextInventoryMetadata as PrototypeTextInventory
from MyCapytain.resolvers.utils import CollectionDispatcher
from capitains_nautilus.cts.resolver import NautilusCTSResolver
from capitains_nautilus.flask_ext import FlaskNautilus
import logging
# We import enough resources from MyCapytain to retrieve data
from MyCapytain.resolvers.cts.api import HttpCtsResolver
from MyCapytain.retrievers.cts5 import HttpCtsRetriever
# We create a Flask app
app = Flask(
__name__
)
tic = TextInventoryCollection()
fro = PrototypeTextInventory("urn:geste", parent=tic) #Rien ร voir avec les identifiants cts, c'est un identifiant de projet
fro.set_label("Corpus de chansons de geste", "fro")
dispatcher = CollectionDispatcher(tic)
@dispatcher.inventory("urn:geste")
def dispatchGeste(collection, path=None, **kwargs):
if collection.id.startswith("urn:cts:froLit"): #et cette fois, c'est bien du cts et on file le dรฉbut des chemins de citation.
return True
return False
cache = Cache()
NautilusDummy = NautilusCTSResolver(
[
"."
],
dispatcher=dispatcher
)
NautilusDummy.logger.setLevel(logging.ERROR)
def scheme_grouper(text, getreffs):
level = len(text.citation)
groupby = 5
types = [citation.name for citation in text.citation]
if 'word' in types:
types = types[:types.index("word")]
if str(text.id) == "urn:cts:latinLit:stoa0040.stoa062.opp-lat1":
level, groupby = 1, 2
elif types == ["vers", "mot"]:
level, groupby = 1, 100
elif types == ["book", "poem", "line"]:
level, groupby = 2, 1
elif types == ["book", "line"]:
level, groupby = 2, 30
elif types == ["book", "chapter"]:
level, groupby = 2, 1
elif types == ["book"]:
level, groupby = 1, 1
elif types == ["line"]:
level, groupby = 1, 30
elif types == ["chapter", "section"]:
level, groupby = 2, 2
elif types == ["chapter", "mishnah"]:
level, groupby = 2, 1
elif types == ["chapter", "verse"]:
level, groupby = 2, 1
elif "line" in types:
groupby = 30
return level_grouper(text, getreffs, level, groupby)
nautilus = FlaskNautilus(
app=app,
prefix="/api",
name="nautilus",
resolver=NautilusDummy
)
nemo = Nemo(
app=app,
base_url="/geste",
resolver=NautilusDummy,
chunker={"default": scheme_grouper},
plugins=None,
cache=cache,
transform={
"default": "./geste.xslt"
},
css=[
# USE Own CSS
"./styles/geste.css"
],
js=[
# use own js file to load a script to go from normalized edition to diplomatic one.
"./styles/geste.js"
],
templates={
"main": "./templates"
},
statics=["./images/logo-enc2.png","./fonts/Junicode-Regular.ttf","./fonts/Junicode-Regular.woff"]
#,
#additional_static=[
# "img/logo-enc2.jpg"
#]
)
cache.init_app(app)
if __name__ == "__main__":
app.run()
| [
"jbcamps@hotmail.com"
] | jbcamps@hotmail.com |
f011ab83e71888ec2f09ea886e39c022a750a85a | 82441f872cc36e5833e1378157c5c413141d736b | /hosts.py | 889dc7b144d2642724fb07f27d59f30d6bff207b | [] | no_license | tikalk/ft-deploy | 9baa035dc76d9a70d902dfe9e0240bbca77c3cfd | 143af780510ddd463591d00a44e9b755423d82b8 | refs/heads/master | 2020-12-27T09:34:51.224056 | 2015-12-13T22:47:29 | 2015-12-13T22:47:29 | 47,748,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #!/usr/bin/env python
import json
import subprocess
import sys
BASE_PARAMS = {
'ansible_python_interpreter': '/usr/bin/python2.7',
}
out = subprocess.check_output(['docker-machine', 'ls'])
dockers = {}
machines = out.splitlines()[1:]
for machine_info in machines:
try:
machine_info = machine_info.replace('*', '').split()
machine_name = machine_info[0]
machine_driver = machine_info[2]
if not machine_info[3].startswith('tcp://'):
continue
machine_ip = machine_info[3].replace('tcp://', '').split(':')[:1][0]
except:
continue
dockers[machine_name] = {
'ansible_ssh_private_key_file': '~/.docker/machine/machines/%s/id_rsa' % machine_name,
'ansible_ssh_user': 'docker',
'ansible_ssh_host': machine_ip,
}
dockers[machine_name].update(BASE_PARAMS)
inventory = {
'localhost': {'ansible_connection': 'local', },
'dockers': dockers.keys(),
}
inventory['localhost'].update(BASE_PARAMS)
inventory.update(dockers)
json.dump(inventory, sys.stdout)
| [
"hagzag@hagzag.com"
] | hagzag@hagzag.com |
94469e411f69931b1aa7dec9d60e62e9d87a7eff | 3e917645a0e1375189c8ee8c1e93ed15348111ef | /projects/usxp/archive/parrallel/parallel_nibble_v2.py | 792bbb8be009b4feb157af5c7e2bf1c7bf54ad07 | [] | no_license | mbougie/gibbs | d4544e688ce2b63530535e1f5102328aece30e0d | 39d5dc0866fc0dd149d0cf1f22bfd20911a9d29e | refs/heads/master | 2021-01-12T06:59:27.214123 | 2020-01-07T15:48:12 | 2020-01-07T15:48:12 | 83,906,717 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,710 | py | import arcpy
from arcpy import env
from arcpy.sa import *
import multiprocessing
import os
import glob
import sys
import time
import logging
from multiprocessing import Process, Queue, Pool, cpu_count, current_process, Manager
import general as gen
# arcpy.env.overwriteOutput = True
arcpy.env.scratchWorkspace = "in_memory"
case=['Bougie','Gibbs']
#import extension
arcpy.CheckOutExtension("Spatial")
#establish root path for this the main project (i.e. usxp)
rootpath = 'C:/Users/Bougie/Desktop/Gibbs/data/usxp/'
# rootpath = 'D:/projects/ksu/v2/'
### establish gdb path ####
def defineGDBpath(arg_list):
gdb_path = '{}{}/{}/{}.gdb/'.format(rootpath,arg_list[0],arg_list[1],arg_list[2])
# print 'gdb path: ', gdb_path
return gdb_path
####### define raster and mask ####################
class ProcessingObject(object):
def __init__(self, series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
self.series = series
self.res = str(res)
self.mmu =str(mmu)
self.years = years
self.name = name
self.subname = subname
self.parent_seq = parent_seq
self.mask_seq = mask_seq
self.outraster_seq = outraster_seq
self.datarange = str(self.years[0])+'to'+str(self.years[1])
print 'self.datarange:', self.datarange
self.dir_tiles = 'C:/Users/Bougie/Desktop/Gibbs/tiles/'
# s9_ytc30_2008to2016_mmu5_nbl_bfc
if self.name == 'mtr':
self.traj = self.series+'_traj_cdl'+self.res+'_b_'+self.datarange+'_rfnd'
self.gdb_parent = defineGDBpath(gdb_parent)
self.raster_parent = self.traj+self.parent_seq
self.path_parent = self.gdb_parent + self.raster_parent
print 'self.path_parent', self.path_parent
self.gdb_child = defineGDBpath(gdb_child)
self.raster_mask = self.raster_parent + self.mask_seq
self.path_mask = self.gdb_child + self.raster_mask
self.raster_nbl = self.raster_parent + self.outraster_seq
self.path_nbl = self.gdb_child + self.raster_nbl
print 'self.path_nbl', self.path_nbl
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_mtr'
print self.out_fishnet
self.pixel_type = "16_BIT_UNSIGNED"
else:
self.gdb_parent = defineGDBpath(['s14', 'post', self.name])
self.yxc_foundation = self.series+'_'+self.name+self.res+'_'+self.datarange+'_mmu'+self.mmu
print 'self.yxc_foundation', self.yxc_foundation
self.path_parent = self.gdb_parent + self.yxc_foundation
print 'self.path_parent', self.path_parent
self.raster_mask = self.yxc_foundation + '_msk'
self.path_mask = self.gdb_parent + self.raster_mask
print 'self.path_mask', self.path_mask
self.out_fishnet = defineGDBpath(['ancillary','vector', 'shapefiles']) + 'fishnet_ytc'
self.pixel_type = "16_BIT_UNSIGNED"
self.raster_nbl = self.yxc_foundation + '_nbl'
print 'self.raster_nbl:', self.raster_nbl
self.path_nbl = self.gdb_parent + self.raster_nbl
print 'self.path_nbl', self.path_nbl
# def existsDataset(self):
# dataset = self.gdb_parent + self.raster_parent + '_nbl'
# if arcpy.Exists(dataset):
# print 'dataset already exists'
# return
# else:
# print 'dataset: ', dataset
# return self.raster_parent + '_nbl'
def create_fishnet():
#delete previous fishnet feature class
arcpy.Delete_management(nibble.out_fishnet)
#acquire parameters for creatfisnet function
XMin = nibble.path_parent.extent.XMin
YMin = nibble.path_parent.extent.YMin
XMax = nibble.path_parent.extent.XMax
YMax = nibble.path_parent.extent.YMax
origCord = "{} {}".format(XMin, YMin)
YAxisCord = "{} {}".format(XMin, YMax)
cornerCord = "{} {}".format(XMax, YMax)
cellSizeW = "0"
cellSizeH = "0"
numRows = 7
numCols = 7
geotype = "POLYGON"
arcpy.env.outputCoordinateSystem = nibble.path_parent.spatialReference
print nibble.path_parent.spatialReference.name
#call CreateFishnet_management function
arcpy.CreateFishnet_management(nibble.out_fishnet, origCord, YAxisCord, cellSizeW, cellSizeH, numRows, numCols, cornerCord, "NO_LABELS", "", geotype)
def execute_task(args):
in_extentDict, nibble = args
fc_count = in_extentDict[0]
# print fc_count
procExt = in_extentDict[1]
# print procExt
XMin = procExt[0]
YMin = procExt[1]
XMax = procExt[2]
YMax = procExt[3]
#set environments
#The brilliant thing here is that using the extents with the full dataset!!!!!! DONT EVEN NEED TO CLIP THE FULL RASTER TO THE FISHNET BECASUE
arcpy.env.snapRaster = nibble.path_parent
arcpy.env.cellsize = nibble.path_parent
arcpy.env.extent = arcpy.Extent(XMin, YMin, XMax, YMax)
### Execute Nibble #####################
ras_out = arcpy.sa.Nibble(nibble.path_parent, nibble.path_mask, "DATA_ONLY")
#clear out the extent for next time
arcpy.ClearEnvironment("extent")
# print fc_count
outname = "tile_" + str(fc_count) +'.tif'
#create Directory
outpath = os.path.join("C:/Users/Bougie/Desktop/Gibbs/", r"tiles", outname)
ras_out.save(outpath)
def mosiacRasters(nibble):
tilelist = glob.glob(nibble.dir_tiles+'*.tif')
print tilelist
######mosiac tiles together into a new raster
arcpy.MosaicToNewRaster_management(tilelist, nibble.gdb_parent, nibble.raster_nbl, Raster(nibble.path_parent).spatialReference, nibble.pixel_type, nibble.res, "1", "LAST","FIRST")
##Overwrite the existing attribute table file
arcpy.BuildRasterAttributeTable_management(nibble.path_nbl, "Overwrite")
## Overwrite pyramids
gen.buildPyramids(nibble.path_nbl)
def run(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq):
#instantiate the class inside run() function
nibble = ProcessingObject(series, res, mmu, years, name, subname, pixel_type, gdb_parent, parent_seq, gdb_child, mask_seq, outraster_seq)
print nibble.res
# need to create a unique fishnet for each dataset
#create_fishnet()
#remove a files in tiles directory
tiles = glob.glob(nibble.dir_tiles+"*")
for tile in tiles:
os.remove(tile)
#get extents of individual features and add it to a dictionary
extDict = {}
count = 1
for row in arcpy.da.SearchCursor(nibble.out_fishnet, ["SHAPE@"]):
extent_curr = row[0].extent
ls = []
ls.append(extent_curr.XMin)
ls.append(extent_curr.YMin)
ls.append(extent_curr.XMax)
ls.append(extent_curr.YMax)
extDict[count] = ls
count+=1
# print 'extDict', extDict
# print'extDict.items()', extDict.items()
######create a process and pass dictionary of extent to execute task
pool = Pool(processes=cpu_count())
# pool = Pool(processes=1)
pool.map(execute_task, [(ed, nibble) for ed in extDict.items()])
pool.close()
pool.join
mosiacRasters(nibble) | [
"mbougie@wisc.edu"
] | mbougie@wisc.edu |
f20a2e2ee965a0bd4785f21b4b185b1d3d82bb37 | fc7d552c915759ab20d9780acac3a1b460f3eb8f | /CourserSite/wsgi.py | 0b770fece5d5de0dc20753d87446e6d974fcdce7 | [] | no_license | adwojak/courser | de3337fd79ce28c91749edb73a8508d769b780f5 | ea94f156c776bbdf721401f989500ad71236c138 | refs/heads/master | 2020-04-01T07:59:33.222971 | 2018-11-14T15:01:24 | 2018-11-14T15:01:24 | 153,012,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
WSGI config for CourserSite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CourserSite.settings')
application = get_wsgi_application()
| [
"dwojaka207@gmail.com"
] | dwojaka207@gmail.com |
84df639b00d52689ce79b4e4ecbc6317750c2a9d | 5f71223206cd63d4ddac6578c80b223924a8d558 | /pyladies/PyLadies/bin/python-config | 1013e19bdd5af186474fcefd5aa2b45b68f760ad | [
"Zlib"
] | permissive | ankita0000/pyladies | bb08eb1337297525f1b94e110165d8967b53ce77 | ca15fad0736b06c4940c129d66d7cea9d51f461d | refs/heads/master | 2021-01-17T20:28:16.157085 | 2016-11-14T08:44:29 | 2016-11-14T08:44:29 | 66,218,590 | 0 | 0 | null | 2016-08-21T20:12:24 | 2016-08-21T20:12:24 | null | UTF-8 | Python | false | false | 2,353 | #!/home/yo/pyladies/pyladies/PyLadies/bin/python
import sys
import getopt
import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
if sys.version_info >= (3, 2):
valid_opts.insert(-1, 'extension-suffix')
valid_opts.append('abiflags')
if sys.version_info >= (3, 3):
valid_opts.append('configdir')
def exit_with_usage(code=1):
sys.stderr.write("Usage: {0} [{1}]\n".format(
sys.argv[0], '|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
for opt in opt_flags:
if opt == '--prefix':
print(sysconfig.get_config_var('prefix'))
elif opt == '--exec-prefix':
print(sysconfig.get_config_var('exec_prefix'))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_path('include'),
'-I' + sysconfig.get_path('platinclude')]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print(' '.join(flags))
elif opt in ('--libs', '--ldflags'):
abiflags = getattr(sys, 'abiflags', '')
libs = ['-lpython' + pyver + abiflags]
libs += getvar('LIBS').split()
libs += getvar('SYSLIBS').split()
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
libs.insert(0, '-L' + getvar('LIBPL'))
if not getvar('PYTHONFRAMEWORK'):
libs.extend(getvar('LINKFORSHARED').split())
print(' '.join(libs))
elif opt == '--extension-suffix':
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix is None:
ext_suffix = sysconfig.get_config_var('SO')
print(ext_suffix)
elif opt == '--abiflags':
if not getattr(sys, 'abiflags', None):
exit_with_usage()
print(sys.abiflags)
elif opt == '--configdir':
print(sysconfig.get_config_var('LIBPL'))
| [
"mukherjeeankita60@gmail.com"
] | mukherjeeankita60@gmail.com | |
fb121a9a3c4898a93a4dd644bb433b8bec45bae3 | 2ca1720c6fde441a3692fa77fd9d60e8fc9e3a31 | /programs/file_owners.py | 34c528d98b0364200a31e6931c7d52ae25579164 | [] | no_license | navinas33/python_programs | e024c6293a6ff355e98789481e3c23d80b921fb8 | 18da3c27c08f6a2be52498fe26574e353d0109b2 | refs/heads/main | 2023-08-24T18:15:10.299426 | 2021-10-28T13:36:57 | 2021-10-28T13:36:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | # Implement a group_by_owners function that:
#
# Accepts a dictionary containing the file owner name for each file name.
# Returns a dictionary containing a list of file names for each owner name, in any order.
# For example, for dictionary {'Input.txt': 'Randy', 'Code.py': 'Stan', 'Output.txt': 'Randy'} the group_by_owners function should return {'Randy': ['Input.txt', 'Output.txt'], 'Stan': ['Code.py']}.
def group_by_owners(files):
return_dict = {}
for key, value in files.items():
if value not in return_dict:
return_dict[value] = []
return_dict[value].append(key)
return return_dict
if __name__ == "__main__":
files = {
'Input.txt': 'Randy',
'Code.py': 'Stan',
'Output.txt': 'Randy'
}
print(group_by_owners(files)) | [
"navinkumar.a_s@nokia.com"
] | navinkumar.a_s@nokia.com |
b89525a6a71b8cc792f2e810cdd2533d6a458e5a | efb5aa8a065d8087690e0302838c66db1e69b8c9 | /payments/payments/urls.py | b725c1d1cfb6d46b9458ae5ffc55dc886638f592 | [] | no_license | Harivj18/Payment-gateway-integration | 721ccfe46c7f91f21e60732d0853df80622b7702 | 47502f64ef497dbc4ad9a25797135c36fee615d6 | refs/heads/main | 2023-08-17T15:34:52.639766 | 2021-09-17T11:52:46 | 2021-09-17T11:52:46 | 407,517,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 791 | py | """payments URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('app.urls'))
]
| [
"86662698+Harivj18@users.noreply.github.com"
] | 86662698+Harivj18@users.noreply.github.com |
c3192ecfb8c725f98b311d74c51bb7580f217753 | 0764ea1804860e5e373ced6c972342c3be32161b | /package1/enrich.py | 7b2a3052eaff10a29a3942d8b9116e6f4a11c4a7 | [] | no_license | sacses/Ironhack-Module-1-Project---Pipeline-Project | 5fe7d049edc74ef168c913c2a093b77236631e5f | a34ca92cbe2cafc2aa596445ccee27cf4a1f2912 | refs/heads/master | 2022-12-11T20:45:10.839028 | 2020-02-28T13:52:49 | 2020-02-28T13:52:49 | 226,491,438 | 0 | 0 | null | 2022-07-06T20:27:11 | 2019-12-07T10:13:10 | Jupyter Notebook | UTF-8 | Python | false | false | 3,014 | py | import json
import requests
import pandas as pd
def lowercase_feature(df, col):
return df[col].str.lower()
def export_csv(path, df, deli):
return df.to_csv(path, sep=deli, index=False)
def extract_float(df, col, regex):
return df[col].str.extract(regex).astype('float64')
def enrich(df_cleaned):
url = 'https://en.wikipedia.org/wiki/List_of_countries_by_research_and_development_spending'
rnd_spending = pd.read_html(url)[0]
export_csv(f'data/raw/enrich_input.csv', rnd_spending, ';')
print('Enrich input is found in raw folder')
rnd_spending.columns = rnd_spending.iloc[1]
rnd_spending.drop(rnd_spending.index[0:2], inplace=True)
rnd_spending.rename(columns={'Country/Region': 'country',
'Expenditures on R&D (billions of US$, PPP),': 'r&dExpense_billions',
'% of GDP PPP': '%GDP',
'Expenditures on R&D per capita (US$ PPP),': 'r&dExpensePerCapita'}, inplace=True)
rnd_spending['%GDP'] = extract_float(rnd_spending, '%GDP', r"(\d+.?\d*)")
rnd_spending[['r&dExpense_billions', 'r&dExpensePerCapita']] = rnd_spending[
['r&dExpense_billions', 'r&dExpensePerCapita']].astype('float64')
rnd_spending['country'] = lowercase_feature(rnd_spending, 'country')
merged_df = pd.merge(df_cleaned, rnd_spending[['country', 'r&dExpense_billions', '%GDP', 'r&dExpensePerCapita']],
on='country', how='left')
merged_df[['r&dExpense_billions', '%GDP', 'r&dExpensePerCapita']] = merged_df[
['r&dExpense_billions', '%GDP', 'r&dExpensePerCapita']].fillna(0)
table_analysis = merged_df.groupby('country').agg(billionaires=('id', 'count'),
age=('age', 'mean'),
gender=('gender', 'mean'),
mean_billionaire_pos=('position', 'mean'),
median_billionaire_pos=('position', 'median'),
total_billionaire_worth=('worth', 'sum'),
mean_billionaire_worth=('worth', 'mean'),
median_billionaire_worth=('worth', 'median'),
rd_expense_billions=('r&dExpense_billions', 'max'),
rd_expense_capita=('r&dExpensePerCapita', 'max'),
percent_GDP=('%GDP', 'max')).sort_values(by='billionaires',
ascending=False)[
1:].reset_index()
export_csv(f'data/processed/enriched_df.csv', table_analysis, ';')
print('Enriched DataFrame can be found in processed folder')
return table_analysis
| [
"fcodpaula.tamarit@gmail.com"
] | fcodpaula.tamarit@gmail.com |
e04ad62d77e4ef0fac9e099d6f69b7b3bbd9942b | c2ee0110335b7810e8257a67e1260499824b7755 | /Car.py | ecbb6c6b0df16a6241f60e71015b54c86b3571e5 | [] | no_license | notkevin1/T2V | f0b8c54b9504372f6475935429d61bb0e80f5404 | 51a57d8dbd290f6f363c55f8d9fb2a2be07c4b11 | refs/heads/master | 2022-12-01T01:46:00.763940 | 2020-08-04T22:00:05 | 2020-08-04T22:00:05 | 285,109,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | import bluetooth
class Car:
def __init__(self, btAddress, RFIDid):
self.btAddress = btAddress
self.RFIDid = RFIDid
self.btSocket = bluetooth.BluetoothSocket(bluetooth.RFCOMM)
def connect(self):
try:
self.btSocket.connect((self.btAddress, 1))
except Exception as e:
print(e)
def send(self, command):
try:
self.btSocket.send('$' + command + '#')
except Exception as e:
print(e)
def getBTAddress(self):
return self.btAddress
def getRFIDid(self):
return self.RFIDid | [
"noreply@github.com"
] | notkevin1.noreply@github.com |
4db616cd5fc1beee21a747beae32fd9ffbb06fb1 | a282dcc225273e5d4482f8468077007ece25830e | /Engine/src/Core/KeyCodes/KeyCode.py | 0ca6785f8ad775443f1dc594a975c75131cf36be | [] | no_license | WolfenGames/unnamed_project_python | 8b0d82159613b55651a2397b8906ab21d4ed2864 | 18630fa017706ad53c46448a840cd3441d663dac | refs/heads/main | 2023-03-14T12:02:26.572690 | 2021-03-10T10:05:01 | 2021-03-10T10:05:01 | 327,002,960 | 1 | 0 | null | 2021-03-10T10:05:02 | 2021-01-05T13:12:12 | Python | UTF-8 | Python | false | false | 2,198 | py | from enum import Enum
class KeyCode(Enum):
Space = 32,
Apostrophe = 39,
Comma = 44,
Minus = 45,
Period = 46,
Slash = 47,
D0 = 48, # /* 0 */
D1 = 49, # /* 1 */
D2 = 50, # /* 2 */
D3 = 51, # /* 3 */
D4 = 52, # /* 4 */
D5 = 53, # /* 5 */
D6 = 54, # /* 6 */
D7 = 55, # /* 7 */
D8 = 56, # /* 8 */
D9 = 57, # /* 9 */
Semicolon = 59, # /* ; */
Equal = 61, # /* = */
A = 65,
B = 66,
C = 67,
D = 68,
E = 69,
F = 70,
G = 71,
H = 72,
I = 73,
J = 74,
K = 75,
L = 76,
M = 77,
N = 78,
O = 79,
P = 80,
Q = 81,
R = 82,
S = 83,
T = 84,
U = 85,
V = 86,
W = 87,
X = 88,
Y = 89,
Z = 90,
LeftBracket = 91, # /* [ */
Backslash = 92, # /* \ */
RightBracket = 93, # /* ] */
GraveAccent = 96, # /* ` */
World1 = 161, # /* non-US #1 */
World2 = 162, # /* non-US #2 */
# /* Function keys */
Escape = 256,
Enter = 257,
Tab = 258,
Backspace = 259,
Insert = 260,
Delete = 261,
Right = 262,
Left = 263,
Down = 264,
Up = 265,
PageUp = 266,
PageDown = 267,
Home = 268,
End = 269,
CapsLock = 280,
ScrollLock = 281,
NumLock = 282,
PrintScreen = 283,
Pause = 284,
F1 = 290,
F2 = 291,
F3 = 292,
F4 = 293,
F5 = 294,
F6 = 295,
F7 = 296,
F8 = 297,
F9 = 298,
F10 = 299,
F11 = 300,
F12 = 301,
F13 = 302,
F14 = 303,
F15 = 304,
F16 = 305,
F17 = 306,
F18 = 307,
F19 = 308,
F20 = 309,
F21 = 310,
F22 = 311,
F23 = 312,
F24 = 313,
F25 = 314,
# /* Keypad */
KP0 = 320,
KP1 = 321,
KP2 = 322,
KP3 = 323,
KP4 = 324,
KP5 = 325,
KP6 = 326,
KP7 = 327,
KP8 = 328,
KP9 = 329,
KPDecimal = 330,
KPDivide = 331,
KPMultiply = 332,
KPSubtract = 333,
KPAdd = 334,
KPEnter = 335,
KPEqual = 336,
LeftShift = 340,
LeftControl = 341,
LeftAlt = 342,
LeftSuper = 343,
RightShift = 344,
RightControl = 345,
RightAlt = 346,
RightSuper = 347,
Menu = 348
| [
"julian.w16@gmail.com"
] | julian.w16@gmail.com |
ed0549a98330baa914a7eb5ee44017339d97b6fd | a96548da21c01e63f23f49a6dd88514d6fd8b8ca | /kNN for chapter2.py | 8ce4ea0f50b0c651a523c17b93b4c605931c40df | [] | no_license | PureWaterLove/MachineLearningInAction | 3e8724db8a6f121a1e2eedd04e7ff1115f6343db | 39d833e08f1e5bbbced9683d37a03f9f15b9d161 | refs/heads/master | 2020-04-20T16:27:45.293354 | 2019-04-12T00:48:38 | 2019-04-12T00:48:38 | 168,959,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,688 | py | #coding=utf-8
from numpy import * # ็งๅญฆ่ฎก็ฎๅ
numpy
from os import listdir
import operator #่ฟ็ฎ็ฌฆๆจกๅ
import matplotlib
import matplotlib.pyplot as plt
#from imp import reload
#ๆต่ฏๆฐๆฎ
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
labels = ['A','A','B','B']
return group,labels
#ๅ็ฑปๅฝๆฐ
def classify0(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
#่ท็ฆป่ฎก็ฎ ๅผๅง
diffMat = tile(inX,(dataSetSize,1)) - dataSet
sqDiffMat = diffMat ** 2
sqDistances = sqDiffMat.sum(axis = 1)
distances = sqDistances ** 0.5
#่ท็ฆป่ฎก็ฎ ็ปๆ
sortedDistIndicies = distances.argsort()
classCount = {}
#้ๆฉ่ท็ฆปๆๅฐ็kไธช็น ๅผๅง
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
#้ๆฉ่ท็ฆปๆๅฐ็kไธช็น ็ปๆ
sortedClassCount = sorted(classCount.items(),key = operator.itemgetter(1), reverse = True) #ๆๅบ
return sortedClassCount[0][0]
#ๆๆฌ่ฎฐๅฝ่งฃๆ็จๅบ
def file2matrix(filename):
#ๆๅผๆไปถๅนถ่ทๅๆไปถๆๅคๅฐ่ก ๅผๅง
fr = open(filename)
arrayOLines = fr.readlines()
numberOfLines = len(arrayOLines)
#ๆๅผๆไปถๅนถ่ทๅๆไปถๆๅคๅฐ่ก ็ปๆ
#ๅๅปบ่ฟๅ็NumPy็ฉ้ต
returnMat = zeros((numberOfLines,3))
classLabelVector = []
index = 0
#่งฃๆๆไปถๆฐๆฎๅฐๅ่กจ ๅผๅง
for line in arrayOLines:
line = line.strip() #ๆชๅๆๅ่ฝฆๅญ็ฌฆ
listFromLine = line.split('\t') #็จ \t ๅฐไธไธๆญฅๅพๅฐ็ๆด่กๆฐๆฎๅๅฒๆไธไธชๅ
็ด ๅ่กจ
returnMat[index,:] = listFromLine[0:3] #้ๅๅ3ไธชๅ
็ด ๏ผๅนถๅญๅจๅฐ็นๅพ็ฉ้ตไธญ
classLabelVector.append(int(listFromLine[-1])) #็ดขๅผๅผ-1่กจ็คบๅ่กจไธญ็ๆๅไธๅๅ
็ด
index += 1
#่งฃๆๆไปถๆฐๆฎๅฐๅ่กจ ็ปๆ
return returnMat,classLabelVector
#ๅฝไธๅ็นๅพๅผ
def autoNorm(dataSet):
minVals = dataSet.min(0) #ไปๅไธญ้ๅๆๅฐๅผ
maxVals = dataSet.max(0)
ranges = maxVals - minVals
normDataSet = zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet - tile(minVals,(m,1)) #tile()ๅฝๆฐๅฐๅ้ๅ
ๅฎนๅคๅถๆ่พไบบ็ฉ้ตๅๆ ทๅคงๅฐ็็ฉ้ต
normDataSet = normDataSet/tile(ranges,(m,1))
return normDataSet,ranges,minVals
#ๆต่ฏไปฃ็
def datingClassTest():
hoRatio = 0.02
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m * hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
#print "the classifier came back with: %d, the real answer is : %d" % (classifierResult,datingLabels[i])
print('the classifier came back with: {}, the real answer is : {}'.format(classifierResult,datingLabels[i]))
if(classifierResult != datingLabels[i]):
errorCount += 1.0
#print "the total error rate is : %f" %(errorCount/float(numTestVecs))
print('the total error rate is : {}'.format(errorCount/float(numTestVecs)))
#็บฆไผ็ฝ็ซ้ขๆตๅฝๆฐ
def classifyPerson():
resultList = ['not at all','in small doses','in large doses']
percentTats = float(input('percentage of time spent playing video games?'))
ffMiles = float(input('frequent flier miles earned per years?'))
iceCream = float(input('liters of ice cream consumed per year'))
datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
normMat,ranges,minVals = autoNorm(datingDataMat)
inArr = array([ffMiles,percentTats,iceCream])
classifierResult = classify0((inArr - minVals)/ranges,normMat,datingLabels,3)
print('You will probably like this person : {}'.format(resultList[classifierResult - 1]))
#ๅฐไบ่ฟๅถๅพๅ่ฝฌๅไธบๅ้
def img2vector(filename):
returnVect = zeros((1,1024))
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0,32 * i + j] = int(lineStr[j])
return returnVect
#ๆๅ่ฏๅซ็ณป็ปๆต่ฏไปฃ็
def handwritingClassTest():
hwLabels = []
trainingFileList = listdir('digits/trainingDigits') #่ทๅ็ฎๅฝๅ
ๅฎน
m = len(trainingFileList) #็ฎๅฝไธญๆไปถไธชๆฐ
trainingMat = zeros((m,1024)) #ๆฏ่กๆฐๆฎๅญๅจไธไธชๅพๅ
for i in range(m):
#ไปๆไปถๅ่งฃๆๅ็ฑปๆฐๆฎ ๅผๅง
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0]
classNumberStr = int(fileStr.split('_')[0])
#ไปๆไปถๅ่งฃๆๅ็ฑปๆฐๆฎ ็ปๆ
hwLabels.append(classNumberStr)
trainingMat[i,:] = img2vector('digits/trainingDigits/%s' %fileNameStr)
testFileList = listdir('digits/testDigits')
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split(',')[0]
classNumberStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('digits/testDigits/%s' %fileNameStr)
classifierResult = classify0(vectorUnderTest,trainingMat,hwLabels,3)
print('the classifier came back with : {}, the real answer is : {}'.format(classifierResult,classNumberStr))
if(classifierResult != classNumberStr):
errorCount += 1.0
print('\nthe total number of errors is : {}'.format(errorCount))
print('\nthe total error rate is : {}'.format(errorCount/float(mTest)))
"""ไปฅไธไปฃ็ ๅไธบ่ฐ่ฏๅฝๆฐๆ็จไปฃ็ ๏ผ้่ฆๆถๅๅบๅณๅฏ
#group,labels = createDataSet()
#print(classify0([0,0],group,labels,3))
#reload(kNN)
#ๅ ่ฝฝๆฐๆฎ้
#datingDataMat,datingLabels = file2matrix('datingTestSet2.txt')
#print(datingDataMat)
#print(datingLabels[0:20])
#ๆฐๆฎ้ๅพๅๅ
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.scatter(datingDataMat[:,1],datingDataMat[:,2]) #ๆ ๆ ่ฎฐๆฃ็นๅพ
#ax.scatter(datingDataMat[:,1],datingDataMat[:,2],15.0*array(datingLabels),15.0*array(datingLabels)) #ๆๆ ่ฎฐๆฃ็นๅพ
#plt.show()
normMat10,ranges,minVals = autoNorm(datingDataMat)
print("normMat = ")
print(normMat)
print("ranges = ")
print(ranges)
print("minVals = ")
print(minVals)
#datingClassTest()
#classifyPerson()
testVector = img2vector('digits/testDigits/0_1.txt')
print(testVector[0,0:31])
print(testVector[0,32:63])
"""
handwritingClassTest()
| [
"noreply@github.com"
] | PureWaterLove.noreply@github.com |
2304d18e6fa69e2a6cf6dec70f35a20bf6c01843 | e23de36d3d78a6517644f91e562dce8d6d5455ca | /pedidos/views.py | 981c048321177c95642d415a8b8432093c5b2f6d | [] | no_license | lyralemos/danubio-DEPRECATED | 8f0dab10cb142c25c68b00aa1f6b52daf707af37 | d31b3616eba7b3919aec429597cbdf892f21e0bc | refs/heads/master | 2021-05-28T06:20:25.296195 | 2013-08-24T20:33:48 | 2013-08-24T20:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | from django.utils import simplejson
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse, reverse_lazy
from django.views.generic import TemplateView, ListView, CreateView, UpdateView, DetailView
from django.db.models import Q
from extra_views import CreateWithInlinesView, UpdateWithInlinesView, InlineFormSet
from models import Cliente,Endereco,Produto,Pedido,PedidoProduto
from forms import PedidoForm, PedidoProdutoForm, ProdutoForm
class IndexView(TemplateView):
template_name = "pedidos/index.html"
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['cliente_list'] = Cliente.objects.all()[:5]
context['produto_list'] = Produto.objects.all()[:5]
context['pedido_list'] = Pedido.objects.all()[:5]
return context
class SearchListView(ListView):
def get_queryset(self):
search = self.request.GET.get('search',None)
if search:
return self.model.objects.filter(nome__contains=search)
return self.model.objects.all()
class SearchPedidosView(ListView):
def get_queryset(self):
termo = self.request.GET.get('q',None)
status = self.request.GET.get('status',None)
order = self.request.GET.get('order',None)
search = self.model.objects.all()
if termo:
search = search.filter(Q(cliente__nome__contains=termo) | Q(pk__contains=termo))
if status:
pass
if order:
search = search.order_by(order)
return search
class EnderecoInline(InlineFormSet):
model = Endereco
class CreateClienteView(CreateWithInlinesView):
model = Cliente
inlines = [EnderecoInline]
class UpdateClienteView(UpdateWithInlinesView):
model = Cliente
inlines = [EnderecoInline]
class PedidoProdutoInline(InlineFormSet):
model = PedidoProduto
form_class = PedidoProdutoForm
extra = 10
class CreatePedidoView(CreateWithInlinesView):
model = Pedido
form_class = PedidoForm
inlines = [PedidoProdutoInline]
def get_success_url(self):
print self
return reverse_lazy('imprimir_pedido_view',args=[self.object.id])
class UpdatePedidoView(UpdateWithInlinesView):
model = Pedido
form_class = PedidoForm
inlines = [PedidoProdutoInline]
def get_success_url(self):
return reverse_lazy('imprimir_pedido_view',args=[self.kwargs['pk']])
class CreateProdutoView(CreateView):
model = Produto
form_class = ProdutoForm
success_url=reverse_lazy('produtos_view')
template_name='pedidos/form.html'
class UpdateProdutoView(UpdateView):
model = Produto
form_class = ProdutoForm
success_url=reverse_lazy('produtos_view')
template_name='pedidos/form.html'
class ComprovanteView(DetailView):
template_name='pedidos/comprovante.html'
def get_context_data(self,*args, **kwargs):
context = super(ComprovanteView, self).get_context_data(*args,**kwargs)
context['itens_pedido'] = PedidoProduto.objects.filter(pedido__pk=self.kwargs['pk'])
context['total'] = len(context['itens_pedido'])
context['repeat'] = range(2)
return context
def get_price(request,pk):
produto = Produto.objects.get(pk=pk)
results = simplejson.dumps(
{
'pk': produto.pk,
'price': float(produto.preco)
}
)
return HttpResponse(results, mimetype='application/javascript')
def get_endereco(request,pk):
enderecos = Endereco.objects.filter(cliente__pk=pk)
results = simplejson.dumps([
{
'pk' : endereco.pk,
'nome' : endereco.__unicode__()
} for endereco in enderecos
])
return HttpResponse(results, mimetype='application/javascript')
def modificar_status(request,pk):
pedido = Pedido.objects.get(pk=pk)
acao = request.GET.get('acao')
if acao == '5':
pedido.valor_pago = pedido.total()
elif acao == '2':
pedido.entregue = True
pedido.save()
return HttpResponseRedirect(reverse('pedidos_view'))
def imprimir(request,pk):
return HttpResponse() | [
"lyralemos@gmail.com"
] | lyralemos@gmail.com |
5c2482df35a2b3e2793446e744596a4eff53075d | 920ab19b73a7cba21d340a49d9d24e2d1eeabf3d | /idpsreact/bin/automat-visualize | 518eafa6739f15f864b7d8624057a1b909d8f1e5 | [
"MIT"
] | permissive | DTrafford/IDPS | 5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4 | 1eaccfc218adcb7231e64271731c765f8362b891 | refs/heads/master | 2022-12-16T16:28:34.801962 | 2020-03-30T18:08:09 | 2020-03-30T18:08:09 | 234,163,829 | 0 | 0 | MIT | 2020-09-10T06:26:02 | 2020-01-15T20:10:09 | Python | UTF-8 | Python | false | false | 281 | #!/Users/sangit/Downloads/django-react-boilerplate-master/idpsreact/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from automat._visualize import tool
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(tool())
| [
"d.trafford@outlook.com"
] | d.trafford@outlook.com | |
91cf6e1cd22192ef76a60d109e11794b8da0cc6f | 419855e4815dce0170e4a791716aee769847b5c7 | /social_network/social_network/settings.py | a7edcc7ebc778b2c59d932a6a40b8ef07bc45709 | [] | no_license | JanaStepanchak/social-network | e244d335378445aa20850850f38374678cafe7df | 4488fbeb8530cdd033adc4e284546ab081f67b44 | refs/heads/master | 2021-09-01T10:01:58.983269 | 2017-10-03T21:52:50 | 2017-10-03T21:52:50 | 114,982,206 | 0 | 0 | null | 2017-12-26T10:17:29 | 2017-12-21T08:32:45 | Python | UTF-8 | Python | false | false | 3,276 | py | """
Django settings for social_network project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'm#$)88c)t=zyhf+v8zdecbjf6jyqbc1@f7dj-bd600w71a^bw*'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'social_network_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'social_network.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'social_network/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'social_network.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'social_network_app/static/')
]
| [
"dmytro.revak@gmail.com"
] | dmytro.revak@gmail.com |
480887a024ca94ee43f737c8c35a5981cb2c8cb4 | 79f8dde6225a1836ec15cb2c1488be9df368cc9a | /pipeline-code/main.py | f6d859e8d62f2e94f9650b03fae14018dea02aae | [] | no_license | sigamani/technical-test | 7c2590459493ce00baec1bddb83d4a2c5304447e | 427bead82b46b4ccb3c702f7911e76a73e765133 | refs/heads/main | 2023-08-29T14:13:58.173156 | 2021-10-17T14:27:20 | 2021-10-17T14:27:20 | 417,606,509 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,333 | py | from argparse import ArgumentParser
import pandas as pd
from datetime import date
from modules.response_processing import load_data, clean_data, add_weights, save_to_output
from modules.clustering import kmodes_clustering, process, rules_segmentation
from modules.salient_questions import SalientQuestions
def preprocess_data(df: pd.DataFrame) -> pd.DataFrame:
# Clean data
response_data = clean_data.main(df=df)
# Add weights
response_data = add_weights.main(data=response_data,
gender_col="DEM_WW_GENDER_RB_L_v3_14072020",
age_col="DEM_WW_AGE_DM_L_v1_14072020",
region_col="REGION")
# Save data
save_to_output.processed_responses(data=response_data,
file_name=f'processed_responses_{date_str}.csv')
return response_data
def segmentation(df: pd.DataFrame, segmentation_type='kmodes', **kwargs) -> pd.DataFrame:
"""If segmentation_type = 'rules' additional argument required,
*segmentation_cols: str type, column to segment on e.g. 'DEM_WW_GENDER_RB_L_v3_14072020'
"""
segmented_data = pd.DataFrame()
# Automatic segmentation
if segmentation_type == 'kmodes':
# Run analysis
clustering_data, removed_cols = process.clean_up(df)
cluster_sizes, clustered_df = kmodes_clustering.set_up_and_cluster(clustering_data, cluster_vars = 'all',
override_n_clusters = None)
segmented_data = kmodes_clustering.return_full_clustered_df(clustered_df, df, removed_cols)
# Rules-based segmentation
elif segmentation_type == 'rules':
segmented_data = rules_segmentation.segmentation(df, segmentation_cols = kwargs['segmentation_cols'])
# Save data
save_to_output.processed_responses(data = segmented_data, file_name = f'clustered_responses_{date_str}.csv')
return segmented_data
def make_report(df: pd.DataFrame) -> pd.DataFrame:
print("Instantiate class..")
# Create instance
salient_feats = SalientQuestions(df)
print("Class has been instantiated")
# Calc discover & deliver stats: seg_col= 'cluster' for auto clustering or 'question_code' for rules based
salient_feats.create_summary_stats_df(seg_col = 'cluster')
print("Summary stats have been calculated.")
# Save data
save_to_output.processed_responses(data = salient_feats.summary_stats,
file_name = f'cluster_summary_stats_{date_str}.csv')
return salient_feats.summary_stats
if __name__ == "__main__":
parser = ArgumentParser(description="Run the sentiment model") # todo: update appropriately
parser.add_argument('mode', choices=['process', 'segment', 'report'], default='process',
help='Choose the mode: process, segment, or report.')
parser.add_argument('-s', '--segmentation_type', choices = ['kmodes', 'rules'], default = 'kmodes',
help = "Choose the type of segmentation you'd like to run: kmodes or rules.")
parser.add_argument('-c', '--segmentation_cols', nargs='+') # Takes 1 or more column names (str)
args = parser.parse_args()
today = date.today()
date_str = today.strftime("%b-%d-%Y").replace('-', '_')
print(f"\nToday's date is '{date_str}'\n")
if args.mode == 'process':
data = load_data.response_data(id_column='ID')
preprocess_data(df=data)
if args.mode == 'segment':
data = pd.read_csv(f"data/processed_data/response_data/processed_responses_{date_str}.csv")
print("\nData upload successful.\n")
# e.g. python main.py segment 'rules' -c 'Go City:AIDA_WW_ABA_IMS_07062021' 'DEM_WW_GENDER_RB_L_v3_14072020'
segmented_df = segmentation(data, args.segmentation_type, segmentation_cols = args.segmentation_cols)
print("Segmentation complete. \n")
if args.mode == 'report':
data = pd.read_csv(f'data/processed_data/response_data/clustered_responses_{date_str}.csv')
try:
summary_stats = make_report(data)
print("Reporting complete. \n", summary_stats.head())
except:
print('Data not found. You must run the clustering step first.')
| [
"noreply@github.com"
] | sigamani.noreply@github.com |
35735a0f868b7b08da72f3eb034598d7ec4f2e66 | 76c7687d8ea81b8595b08dbdaf50c38e7757e69f | /app/views.py | 3d0deacf75ac2cdb502d8852d8017d61201c2046 | [] | no_license | alexkorentsvit/Parser_and_Flask | 94285948ba68ceaa2fa00a82ad63e02b276515e2 | 6d41b78db7028602b8162a838d2f283b9caae808 | refs/heads/master | 2022-10-21T14:49:20.164036 | 2017-11-13T23:44:18 | 2017-11-13T23:44:18 | 110,615,640 | 0 | 1 | null | 2022-10-07T21:45:12 | 2017-11-13T23:43:38 | Python | UTF-8 | Python | false | false | 888 | py | from flask import render_template, redirect, session, request
from app import app
import psycopg2
@app.route('/', methods = ['GET'])
@app.route('/Table_LE', methods = ['GET'])
def Table():
data = []
try:
conn = psycopg2.connect("dbname='uo_db2' user='alex_korentsvit' host='localhost' password='qwerty'")
except:
print ("I am unable to connect to the database")
else:
print('successfully connected to the database')
cur = conn.cursor()
cur.execute("SELECT id, EDRPOU_code, Name, State FROM UO_TABLE")
counter = 0
for record in cur:
data.append(record)
counter += 1
if counter == 500:
break
return render_template('Table_LE.html',
title = 'Table',
data = data)
| [
"alex.dota@gmail.com"
] | alex.dota@gmail.com |
cd3c7e62cee0966f54c2fdde7eececa025b438f7 | ca22e34e15e4bef166adbb3376d89c4f841c9acf | /w7/A3.py | 37f12aabaa4a5db589f560c1a3581b3276b8ceaa | [] | no_license | alekseik1/coursera_statistical_mechanics | 599ee7252b7779123aa2e237a632aa894f00e470 | 6f001d41236230dfe15eef3e99d8185e6741031f | refs/heads/master | 2022-11-25T22:10:57.903326 | 2020-07-29T18:25:39 | 2020-07-29T18:25:39 | 198,106,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,769 | py | import math, random, pylab
def levy_harmonic_path(k):
x = [random.gauss(0.0, 1.0 / math.sqrt(2.0 * math.tanh(k * beta / 2.0)))]
if k == 2:
Ups1 = 2.0 / math.tanh(beta)
Ups2 = 2.0 * x[0] / math.sinh(beta)
x.append(random.gauss(Ups2 / Ups1, 1.0 / math.sqrt(Ups1)))
return x[:]
def rho_harm_1d(x, xp, beta):
Upsilon_1 = (x + xp) ** 2 / 4.0 * math.tanh(beta / 2.0)
Upsilon_2 = (x - xp) ** 2 / 4.0 / math.tanh(beta / 2.0)
return math.exp(- Upsilon_1 - Upsilon_2)
beta = 0.1
nsteps = 1000000
low_1, low_2 = levy_harmonic_path(2)
x = {low_1: low_1, low_2: low_2}
data = []
for step in xrange(nsteps):
# move 1
a = random.choice(x.keys())
if a == x[a]:
dummy = x.pop(a)
a_new = levy_harmonic_path(1)[0]
x[a_new] = a_new
else:
a_new, b_new = levy_harmonic_path(2)
x = {a_new: b_new, b_new: a_new}
# move 2
(low1, high1), (low2, high2) = x.items()
weight_old = rho_harm_1d(low1, high1, beta) * rho_harm_1d(low2, high2, beta)
weight_new = rho_harm_1d(low1, high2, beta) * rho_harm_1d(low2, high1, beta)
if random.uniform(0.0, 1.0) < weight_new / weight_old:
x = {low1: high2, low2: high1}
data.append(abs(x.keys()[1] - x.keys()[0]))
pylab.hist(data, normed=True, label='Distance distribution', bins=100)
# Analytical
def prob_r_distinguishable(r, beta):
sigma = math.sqrt(2.0) / math.sqrt(2.0 * math.tanh(beta / 2.0))
prob = (math.sqrt(2.0 / math.pi) / sigma) * math.exp(- r ** 2 / 2.0 / sigma ** 2)
return prob
x_data = [i/10. for i in range(0, 1000)]
pylab.plot(x_data, [prob_r_distinguishable(x, beta) for x in x_data], label='analytical')
pylab.xlim(0, 25)
pylab.legend()
pylab.savefig('A3.png')
pylab.show()
pylab.close()
| [
"1alekseik1@gmail.com"
] | 1alekseik1@gmail.com |
b567461b2f900459393c25e3cd85fd539ba112be | 6a921cd74624ff046c67354d4cb23460f65b6ad2 | /data_postp/scores.py | 59475e323c6a2eeef90783f58b4a9e4b15ffc39c | [
"MIT"
] | permissive | furushchev/DeepEpisodicMemory | 8531db7f80bf3dbfb9ece083c6fd6b0e5161e960 | 0088f3393de549127cd0739298081637a38fb58a | refs/heads/master | 2020-03-21T06:44:55.813040 | 2018-05-30T18:40:14 | 2018-05-30T18:40:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,076 | py | import numpy as np
import pandas as pd
import sklearn
from data_postp import similarity_computations
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def compute_mean_average_precision(df_base, df_query, n_closest_matches=5):
"""
This function computes the mean average precision (MAP) for a set of queries specified by df_query. The average precision
scores for each query are hereby computed based on the provided base instances specified in df_base. For each query,
the nearest neighbor instances within the base are determined and used to compute the precision.
:param df_base: the dataframe to be queried, must contain a 'hidden_repr' column that constitutes the hidden_representation vector
:param df_query: the dataframe from which to query, must contain a 'hidden_repr' column
:param n_closest_matches: number of closest matches to the query that goes into the precision score
:return: a scalar value representing the MAP
"""
assert not df_base.empty and not df_query.empty
df = get_query_matching_table(df_base=df_base, df_query=df_query, n_closest_matches=n_closest_matches)
df_pred_classes = df.filter(like="pred_class")
n_relevant_documents = len(df_pred_classes.columns)
matches = df_pred_classes.isin(df.true_class).as_matrix()
P = np.zeros(shape=matches.shape)
for k in range(1, n_relevant_documents):
P[:, k] = np.mean(matches[:, :k], axis=1)
return np.mean(np.multiply(P, matches))
def get_query_matching_table(df_base, df_query, class_column='category', n_closest_matches=5, df_true_label="true_class",
df_pred_label="pred_class_", df_query_label="category", df_query_id="id"):
"""
Yields a pandas dataframe in which each row contains the n_closest_matches as a result from querying the df for every single
hidden representation in the df dataframe. In addition, every row contains the true label and the query id.
:param df_base: the df to be queried
:param df_query: the df from which to query
:return: pandas dataframe with columns ("id", "true_label", "pred_class_i" for i=1,...,n_closest_matches) and
number of rows equal to df_query rows
"""
assert df_base is not None and df_query is not None
assert 'hidden_repr' in df_base.columns and class_column in df_base.columns
assert 'hidden_repr' in df_query.columns and df_query_label in df_query.columns and df_query_id in df_query.columns
columns = [[df_query_id + "{}".format(i), df_pred_label+"{}".format(i)] for i in range(1, n_closest_matches + 1)]
columns = [e for entry in columns for e in entry] # flatten list in list
columns[:0] = [df_query_id, df_true_label]
query_matching_df = pd.DataFrame(columns=columns)
query_matching_df.set_index(df_query_id, df_true_label)
for hidden_repr, label, id in zip(df_query['hidden_repr'], df_query[df_query_label], df_query[df_query_id]):
closest_vectors = similarity_computations.find_closest_vectors(df_base, hidden_repr=hidden_repr, class_column=class_column,
n_closest_matches=n_closest_matches)
matching_results = [[tpl[2], tpl[1]] for tpl in closest_vectors]
matching_results = sum(matching_results, []) # flatten
matching_results[:0] = [id, label]
row_data = dict(zip(columns, matching_results))
query_matching_df = query_matching_df.append(row_data, ignore_index=True)
return query_matching_df
def main():
valid_file="/common/homes/students/ferreira/Documents/metadata_and_hidden_rep_df_08-09-17_17-00-24_valid.pickle"
df = pd.read_pickle(valid_file)
# create own train/test split
msk = np.random.rand(len(df)) < 0.8
test_df = df[~msk]
print("number of test samples: ", np.shape(test_df)[0])
train_df = df[msk]
print("number of train samples: ", np.shape(train_df)[0])
#df, df_val = similarity_computations.transform_vectors_with_inter_class_pca(train_df, test_df, class_column='category', n_components=50)
compute_mean_average_precision(train_df[:100], test_df[:100])
if __name__ == "__main__":
main()
| [
"f4bio.ferreira@gmail.com"
] | f4bio.ferreira@gmail.com |
912c36ec729b749cdf5a01caf4cd29055cf3f717 | e376062bb68bdd16e285d87103b5ad9b8fea69b7 | /balance_weighted_scales.py | 6a17d9fdb09e65676b3a5d4ff5358d6460bf149a | [] | no_license | AwotG/QAEngineerAssessment | 8146e6091db06aa3d2655d68f6e5cc8d9cdd1ec5 | f35408d24aef88a10308b90f479910546164add1 | refs/heads/master | 2023-07-14T09:23:20.581587 | 2021-08-18T15:05:25 | 2021-08-18T15:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,030 | py | import sys
import re
import logging
from itertools import combinations
from pathlib import Path
########################################################################################################################
# Basic Logging (nothing fancy)
########################################################################################################################
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(levelname)s: %(message)s")
########################################################################################################################
# Constants
########################################################################################################################
SCALE = "scale"
WEIGHT = "weight"
SCRIPT_PATH = Path(__file__).parent
########################################################################################################################
# Gather Input Validation
########################################################################################################################
def parse_user_input_txt(user_input="user_input.txt"):
user_input_txt_path = SCRIPT_PATH / user_input
try:
file = open(user_input_txt_path, "r")
except FileNotFoundError:
logging.error(f"\nCan't find the file': '{user_input}'\nFull Path: '{user_input_txt_path}'\nExiting.")
sys.exit(FileNotFoundError)
except IOError:
logging.error(f"\nDoesn't look like you can open the file: '{user_input}'\nFull Path: '{user_input_txt_path}'\nExiting.")
sys.exit(IOError)
else:
line = file.readline()
scale, weights = re.findall(r'"(.*?)"', line)
file.close()
return scale, weights
def validate_and_parse_values(list_of_strings, type):
"""Scale must have only 2 elements, Weights must have at least 1 element """
length = len(list_of_strings)
if type.lower() == "scale" and length != 2:
logging.error(f"\nScale values have too many elements\nExpected 2 but got {length}\n{list_of_strings}")
sys.exit(ValueError)
elif type.lower() == "weight" and length < 1:
logging.error(f"\nWeight values have too many elements\nExpected 1 but got {length}\n{list_of_strings}")
sys.exit(ValueError)
result_list = strings_to_ints(list_of_strings, type)
return result_list
def input_txt_to_values(raw_value, type):
if type.lower() == SCALE:
raw_scale_values = list(raw_value.strip("[]").split(","))
result_values = validate_and_parse_values(raw_scale_values, SCALE)
elif type.lower() == WEIGHT:
raw_weight_values = list(raw_value.strip("[]").split(","))
result_values = validate_and_parse_values(raw_weight_values, WEIGHT)
else:
return False
return result_values
def strings_to_ints(list_of_strings, type):
"""Values must be non-negative integers, scale elements must be 2, weight elements must be at least 1"""
result = []
for value in list_of_strings:
try:
to_int = int(value)
if to_int < 0: raise ValueError
except ValueError:
logging.error(
f"Unable to convert {type} value {value} to non-negative integer.\nFull {type} values were {list_of_strings}")
sys.exit(ValueError)
else:
result.append(int(value))
return sorted(result)
########################################################################################################################
# Balancing scale algorithm
########################################################################################################################
def check_values(scale_list, weight_list):
left_scale = scale_list[0]
right_scale = scale_list[1]
if len(weight_list) == 1:
return check_with_single_weight(left_scale, right_scale, weight_list)
elif len(weight_list) > 1:
return check_with_two_weights(left_scale, right_scale, weight_list)
def check_with_single_weight(left_scale, right_scale, weight_list):
scale_diff = abs(right_scale-left_scale)
if scale_diff == weight_list[0]:
return f"{weight_list[0]}"
else:
return "No possible solution. Please try again."
def check_with_two_weights(left_scale, right_scale, weight_list):
for pair in combinations(weight_list, 2):
if pair[0] + left_scale == pair[1] + right_scale \
or pair[0] + right_scale == pair[1] + left_scale\
or pair[0] + pair[1] + left_scale == right_scale \
or pair[0] + pair[1] + right_scale == left_scale:
l, i = min(pair), max(pair)
return ','.join([str(l), str(i)])
return "No possible solution. Please try again."
if __name__ == '__main__':
raw_scale, raw_weights = parse_user_input_txt()
processed_scale = input_txt_to_values(raw_scale, SCALE)
processed_weight = input_txt_to_values(raw_weights, WEIGHT)
output = check_values(processed_scale, processed_weight)
print(output) | [
"awotg@Awots-MacBook-Pro.local"
] | awotg@Awots-MacBook-Pro.local |
1c633bb83ec340755424794ca77ec8a5cecdcbf1 | c253e3c94b66e85d52b1c274e649a8431db0d7d5 | /IT-Lab/assignment-6/codes/1.py | ab5c61b8787c25c90e6d39eaf115480cc804383b | [] | no_license | Abhinal/college-assignments | bfecc9d8dd05b7da5348def9990f42ff28329328 | a93aeee086eb681f946cc343869610e4588af307 | refs/heads/master | 2023-08-16T12:04:35.543135 | 2021-10-22T16:27:33 | 2021-10-22T16:31:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | lst = []
i = 0
while i < 5:
player = input()
lst.append(player)
i += 1
print(lst) | [
"ayushdubey70@gmail.com"
] | ayushdubey70@gmail.com |
b5d06007d6975e93010d7ebcf55a08c25e4aa76e | 9759218bd38f6fb810d5f4efb0d1c55ca0bfeb2b | /fibonacci.py | e44da1b255f3eec4762a99ebf01a2fbf0a5a17d6 | [] | no_license | JoergReinhardt/python_kinematic | 692f2d39de2306847589490eb3a57a9ad985254f | c3613f816b85aea52dc6a3a4368d2d9c39189918 | refs/heads/master | 2021-01-02T08:40:21.962340 | 2014-05-07T10:51:18 | 2014-05-07T10:51:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | def recursive(n):
result = []
if n == 0:
return 0
if n == 1:
return 1
else:
result.append((recursive(n-1)+recursive(n-2)))
return result
def iterative(n):
result = []
a, b = 0, 1
while b < n:
result.append(b)
a, b = b, a+b
return result
| [
"j.p.reinhardt@gmail.com"
] | j.p.reinhardt@gmail.com |
c6bf4ec8ef32a6af3ec0f6395c735cbf041f4f92 | 0536c34638b838b2998064b4b5f13ffa00a14e43 | /ass_to_all_two/ass_module_keyword.py | d001ee5b3fc2e93bfd304cc30e5eb8b86451a131 | [] | no_license | ichoukou/git_repository | 53e319d7df38e052d5509fc0f94efde4f5155f3b | 5c4200832076960fd5ef047373e903adba0c5616 | refs/heads/master | 2020-07-22T02:13:44.806813 | 2017-01-13T04:57:53 | 2017-01-13T04:57:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,133 | py | # -*- coding:utf-8 -*-
import os
import ass_base
import ass_report
import codecs
from ass_module import AssModule
from whoosh.highlight import Formatter,get_text
from whoosh.index import create_in
from whoosh.fields import *
from whoosh.qparser import QueryParser
from whoosh.index import open_dir
from whoosh.analysis import RegexAnalyzer
from whoosh.writing import AsyncWriter
from whoosh.writing import IndexingError
import json
#ๆฃๆตๅ
ณ้ฎ่ฏๆธ
ๅ
def_keyword = [{'id':'1_5', 'key':'password OR passwd OR passw OR pwd OR pass','name':'ๅฏ่ฝๅญๅจๅฏ็ ็ดๆฅๅๅจไปฃ็ ไธญ๏ผๆ่
ๆชไฝฟ็จไธ็จๅฏ็ ่พๅ
ฅๆณ','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_6', 'key':'key or def_keyword','name':'ๅฏ้ฅ็ดๆฅๅๅจไปฃ็ ้','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_9', 'key':'const-string ','name':'็กฌ็ผ็ ๅญ็ฌฆไธฒ','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_10', 'key':'RSAPublicKey or javax.crypto.Cipher or ENCRYPT( or "MD5" or "RSA" or "SHA" or "MD2" or base64 or DES','name':'ๆฒกๆไฝฟ็จๅ ๅฏ็ฎๆณ','not':'1','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_9', 'key':'[13000000000 TO 18999999999]','name':'ๅฏ่ฝๅญๅจๆๆบๅท็ดๆฅๅๅจไปฃ็ ้','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'2_2', 'key':'http:// OR File','name':'ไฝฟ็จไบไธๅฎๅ
จ็็ฝ็ปๅ่ฎฎ','cat':'ๆฐๆฎไผ ่พๅฎๅ
จ'},
{'id':'1_11', 'key':'forName','name':'ๅบ็จไบๅ
จๅๅฐๆบๅถ','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'0_6', 'key':"'Log.'",'name':'ๅซๆ่ฐ่ฏไฟกๆฏ','cat':'ๆฐๆฎๅญๅจๅฎๅ
จ'},
{'id':'1_12', 'key':"'insert into ' or 'update ' or delete from",'name':'ๅฏ่ฝๅญๅจ็ดๆฅSQL่ฏญๅฅ','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_8', 'key':"Runtime.getRuntime().exec(\"su\")",'name':'่ฐ็จrootๆ้','cat':'ๆ้ๆฃๆฅ'},
{'id':'3_0', 'key':'com.android.phone.PhoneGlobals$NotificationBroadcastReceiver OR engineNextBytes','name':'็ต่ฏๆจๆๆ้็ป่ฟๆผๆด(CVE-2013-6272)','cat':'ๆบไปฃ็ ๅฎๅ
จ'},
{'id':'1_0', 'key':'PackageManager.GET_SIGNATURES or getCrc()','not':'1','name':'ๆชๅฏน่ช่บซๅ็ณป็ป็ญพๅไฟกๆฏ่ฟ่กๅฟ
่ฆ็ๅฎๅ
จๆงๆฃๆฅ','cat':'่ช่บซ้ช่ฏๅฎๅ
จ'},
#{'key':'System.loadLibrary','name':'็ดๆฅ่ฐ็จsoๆไปถ','cat':'ๆบไปฃ็ ๅฎๅ
จ','condition':'has_so'}
]
class AssFormatter(Formatter):
"""Puts square brackets around the matched terms.
"""
def format_token(self, text, token, replace=False):
# Use the get_text function to get the text corresponding to the
# token
tokentext = get_text(text, token, replace)
# Return the text as you want it to appear in the highlighted
# string
return "%s(*)" % tokentext
class AssKeyword(AssModule):
#ไปๆไปถ่ฏปๅ๏ผ่ฎพ็ฝฎๆฃๆตๅ
ณ้ฎ่ฏๆธ
ๅ
#[json_file ๅ
ณ้ฎ่ฏๆธ
ๅ]
def set_keyword(self, json_file=''):
if json_file != '':
try:
# self.write_file("new.json", json.dumps(def_keyword))
with open(json_file) as fp:
self.keyword = json.load(fp)
print(type(self.keyword), self.keyword)
except:
print("error of load json")
self.keyword = def_keyword
else:
self.keyword = def_keyword
def init(self, argv):
super(AssKeyword, self).init(argv)
self.apk_index = self.apk_file+".index"
if len(argv)>3:
self.set_keyword(argv[3])
else:
self.set_keyword()
self.abc = []
self.decompile = []
self.condition = {}
return True
#ๅปบ็ซๆบ็ ็ดขๅผ
#[writer ๅๅ
ฅๆไปถๅฏน่ฑก]
def build_src_index(self, writer, ext):
topdir = os.path.join(self.apk_file+"."+ext, ext)
ext_name = "."+ext
ext_len = len(ext_name)
for root, dirs, files in os.walk(topdir, topdown=False):
#hanlde file
if root.find(os.path.join(topdir,"android"))==0:
continue
for name in files:
if name[-ext_len:] == ext_name:
if len(name[:-ext_len])==1 and name[:-ext_len]!='R':
self.abc.append(name)
path = os.path.join(root,name)
disp_path = path.replace(topdir, '')
self.decompile.append(disp_path)
try:
writer.add_document(path=ass_base.b2u(disp_path), content=ass_base.b2u(ass_base.read_file(path)))
except ValueError, Argument:
print "add_document error : ", Argument
# print(path, self.read_file(path))
#ๅปบ็ซๆบ็ ็ดขๅผๅๅ
ฅๅฏน่ฑก
#[ix ๅๅ
ฅๆไปถๅฏน่ฑก]
def build_index_writer(self, ix):
try:
writer = AsyncWriter(ix)
self.build_src_index(writer, "java")
writer.commit()
except IndexingError as ie:
print ie.message + "index Error!!!"
#ๅปบ็ซ็ดขๅผ
def build_index(self, force=False):
self.dex2jar(force)
# self.smali(force)
ass_base.rmdir(self.apk_index, force)
#ๅคๆญๆไปถๅ
ๅฎน
if not os.path.exists(self.apk_index):
os.mkdir(self.apk_index)
analyzer = RegexAnalyzer(ur"([\u4e00-\u9fa5])|(\w+(\.?\w+)*)")
schema = Schema(path=ID(stored=True), content=TEXT(stored=True, analyzer=analyzer))
ix = create_in(self.apk_index, schema)
self.build_index_writer(ix)
else:
ix = open_dir(self.apk_index)
return ix
#่ทๅๆบ็ ๆไปถ
def get_code_files(self):
out = ''
if len(self.decompile)>10:
out = ','.join(self.decompile[:10])
else:
out = ','.join(self.decompile)
return out+',...'
def run(self):
super(AssKeyword, self).run()
#ๆซๆๅ
ณ้ฎ็น
self.report.progress("ๆซๆๅ
ณ้ฎ่ฏ็น")
ix = self.build_index(True)
#ๆฃๆฅๅ
ณ้ฎไฟกๆฏ
if len(self.decompile)>0:
#self.report.addItem(', '.join(self.decompile[:10])+',...', "่ฝ่ขซๅ็ผ่ฏ", 'ๆบไปฃ็ ๅฎๅ
จ')
self.report.setItem('1_3', ', '.join(self.decompile) + ',...')
if len(self.abc) < 3:
#self.report.addItem(', '.join(self.decompile[:10])+',...', "ๆฒกๆ้็จไปฃ็ ๆททๆทๆๆฏ", 'ๆบไปฃ็ ๅฎๅ
จ')
self.report.setItem('1_2', ', '.join(self.decompile) + ',...')
with ix.searcher() as searcher:
for k in self.keyword:
query = QueryParser("content", ix.schema).parse(k['key'])
results = searcher.search(query, terms=True)
if len(results)>1 :
results.formatter = AssFormatter()
if k.get('condition') != None:
str = results[0]['path']
self.condition[k.get('condition')]=ass_base.u2b(str)
else:
if k.get('not')=='1':
if len(results)==0:
#self.report.addItem("", k['name'])
self.report.setItem(k['id'], u'ๆ ')
else:
if len(results)>0:
#self.report.addItem(ass_base.u2b(results[0]['path']+"\n"+results[0].highlights("content")), k['name'], k['cat'])
self.report.setItem(k['id'], ass_base.u2b(results[0]['path']+"\n"+results[0].highlights("content")))
#print(self.condition.items())
ix.close()
self.clean()
if __name__=="__main__":
AssKeyword().main()
| [
"415787837@qq.com"
] | 415787837@qq.com |
4b86928803dc97bc1868161fb7ca4f15f0afe51a | 3def2a4168b879dc6f53330d0a72fb0fc5b6fce0 | /matrix_approach/a25.py | d2d110fbbd7bcd65d1ee38b64650c9841f675c11 | [] | no_license | piyushSTK/learningAI | d67896d8991ec8160928bc6d7488dfa1844f4229 | 916d5c6e7d220e61364a53006532baeedb66a8ad | refs/heads/master | 2020-05-15T16:00:25.272659 | 2019-04-20T11:27:54 | 2019-04-20T11:27:54 | 182,382,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | import numpy as np
import matplotlib.pyplot as plt
len=10
def mid(B,C):
D=(B+C)/2
return D
def normal(AB):
return np.matmul(omat,np.matmul(AB,dvec))
def line_intersect(AD,CF):
n1=normal(AD)
n2=normal(CF)
N=np.vstack((n1,n2))
p=np.zeros(2)
p[0]=np.matmul(n1,AD[:,0])
p[1]=np.matmul(n2,CF[:,0])
return np.matmul(np.linalg.inv(N),p)
l=np.linspace(0,1,10)
ad=np.zeros((2,10))
be=np.zeros((2,10))
cf=np.zeros((2,10))
ab=np.zeros((2,10))
bc=np.zeros((2,10))
ca=np.zeros((2,10))
A=np.array([-2,-2])
B=np.array([1,3])
C=np.array([4,-1])
D=mid(B,C)
E=mid(A,C)
F=mid(A,B)
AD=np.vstack((A,D)).T
CF=np.vstack((C,F)).T
BE=np.vstack((B,E)).T
for i in range(len):
temp=A+(D-A)*l[i]
ad[:,i]=temp.T
temp=B+(E-B)*l[i]
cf[:,i]=temp.T
temp=C+(F-C)*l[i]
be[:,i]=temp.T
temp=A+(B-A)*l[i]
ab[:,i]=temp.T
temp=B+(C-B)*l[i]
bc[:,i]=temp.T
temp=C+(A-C)*l[i]
ca[:,i]=temp.T
dvec=np.array([-1,1])
omat=np.array([[0,1],[-1,0]])
P=line_intersect(AD,CF)
Q=line_intersect(AD,BE)
R=line_intersect(BE,CF)
print(AD)
print(BE)
print(CF)
print(P)
print(Q)
print(R)
plt.plot(A[0],A[1],'o')
plt.text(A[0]*1.1,A[1]*1.1,'A')
plt.plot(B[0],B[1],'o')
plt.text(B[0]*1.1,B[1]*1.1,'B')
plt.plot(C[0],C[1],'o')
plt.text(C[0]*1.1,C[1]*1.1,'C')
plt.plot(D[0],D[1],'o')
plt.text(D[0]*1.1,D[1]*1.1,'D')
plt.plot(E[0],E[1],'o')
plt.text(E[0]*1.1,E[1]*1.1,'E')
plt.plot(F[0],F[1],'o')
plt.text(F[0]*1.1,F[1]*1.1,'F')
plt.plot(P[0],P[1],'o')
plt.text(P[0]*1.1,P[1]*1.1,'G')
plt.plot(ad[0,:],ad[1,:],label='$AD$')
plt.plot(be[0,:],be[1,:],label='$BE$')
plt.plot(cf[0,:],cf[1,:],label='$CF$')
plt.plot(ab[0,:],ab[1,:],label='$AB$')
plt.plot(bc[0,:],bc[1,:],label='$BC$')
plt.plot(ca[0,:],ca[1,:],label='$CA$')
plt.xlabel('$x$')
plt.xlabel('$y$')
plt.legend(loc='best')
plt.grid()
plt.show()
plt.show()
| [
"piyushkumaruttam@gmail.com"
] | piyushkumaruttam@gmail.com |
bd838272251fd61d15841e420f6f1941229fee32 | 51b439c93e96f1e11be20a5e4cb178f43e13e5cf | /String/lengthOfLastWord.py | 5208069ef4bd2c453c9fae1d00abd0fe35a290e1 | [] | no_license | mail-vishalgarg/pythonPracticeOnly | 20a2aa87d064d737c028a31fb84692e30d07b0fa | 8df4016b97b210cdf2ab1266773bc7731b8bcc7b | refs/heads/master | 2020-03-17T12:09:45.425438 | 2019-03-22T04:33:09 | 2019-03-22T04:33:09 | 133,576,745 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | class LengthOfLastWord(object):
def lenghtOfLastWord(self, s):
lastIndexOfSpace = s.rfind(' ')
length_of_last_word = s[lastIndexOfSpace + 1:]
print 'Length of the last word:',len(length_of_last_word)
if __name__ == '__main__':
input_str = 'test only please ignore'
obj = LengthOfLastWord()
obj.lenghtOfLastWord(input_str) | [
"mail.vishalgarg@gmail.com"
] | mail.vishalgarg@gmail.com |
e8611029177ec93e595d82b86b795cbc307b7108 | d4ab63e2ff846ff509ab3b8a191381bdf8197325 | /project/test_main.py | 8544ed907817ff34f90b366519a3db4337d52c5e | [] | no_license | ibrobabs/task | c2c95d8c83340a38be0ff8a1d7d3da55de33a097 | 82adc4fa54ab9c3606b2770325454916c7f75693 | refs/heads/master | 2021-01-18T17:45:31.392805 | 2017-04-01T05:22:24 | 2017-04-01T05:22:24 | 86,812,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,298 | py | import os
import unittest
from project import app, db
from project.config import basedir
from project.models import User
TEST_DB = 'test.db'
class MainTests(unittest.TestCase):
#Setup and Teardown
def setUp(self):
app.config['TESTING'] = True
app.config['WTF_CSRF_ENABLED'] = False
# app.config['DEBUG'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, TEST_DB)
self.app = app.test_client()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
# helper methods
def login(self, name, password):
return self.app.post('/', data=dict(
name=name, password=password), follow_redirects=True)
# tests
def test_404_error(self):
response = self.app.get('/this-route-does-not-exist/')
self.assertEquals(response.status_code, 404)
self.assertIn(b"Sorry. There's nothing here.", response.data)
def test_500_error(self):
bad_user = User(
name='Jeremy',
email='jeremy@realpython.com',
password='django'
)
db.session.add(bad_user)
db.session.commit()
self.assertRaises(ValueError, self.login, 'Jeremy', 'django')
try:
response = self.login('Jeremy', 'django')
self.assertEquals(response.status_code, 500)
except ValueError:
pass
if __name__ == '__main__':
unittest.main() | [
"babskolawole@gmail.com"
] | babskolawole@gmail.com |
bb5f53a8add13647f897bea1a962df952eea6bda | 77a0721bbc98d0ba7ce6a7a319c744948a0726ac | /app/common.py | 8d929450c237188ab5eaeda37ba736f783fcfe3e | [] | no_license | 1111mp/flask_init | e37b192f6e38394599405ea8af203d02f4200f02 | f64851783b5e53921ea7b7632f1a00c389b948e2 | refs/heads/master | 2023-05-10T09:31:49.174888 | 2023-05-02T03:37:49 | 2023-05-02T03:37:49 | 231,494,751 | 1 | 1 | null | 2023-05-02T03:37:50 | 2020-01-03T02:15:23 | Python | UTF-8 | Python | false | false | 2,697 | py | # -*- coding:utf-8 -*-
from datetime import date, datetime, time
import json
import uuid
from sqlalchemy.ext.declarative import DeclarativeMeta
from redis import WatchError
from .extensions import xtredis
from config import USERAUTHKEY
# https://dormousehole.readthedocs.io/en/latest/patterns/apierrors.html
class InvalidUsage(Exception):
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
rv = dict(self.payload or ())
rv['code'] = self.status_code
rv['message'] = self.message
return rv
class ComplexEncoder(json.JSONEncoder):
"""jsonuๅบๅๅๆถๅฏนdatetimeๅdateๅ็นๆฎๅค็"""
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
elif isinstance(obj, time):
return obj.isoformat()
if isinstance(obj, bytes):
return str(obj, encoding='utf-8')
elif isinstance(obj.__class__, DeclarativeMeta):
return self.default({i.name: getattr(obj, i.name) for i in obj.__table__.columns})
elif isinstance(obj, dict):
for k in obj:
try:
if isinstance(obj[k], (datetime, date, DeclarativeMeta)):
obj[k] = self.default(obj[k])
else:
obj[k] = obj[k]
except TypeError:
obj[k] = None
return obj
else:
return json.JSONEncoder.default(self, obj)
def successReturn(data, msg=''):
return {
'code': 200,
'data': data,
'msg': msg
}
def cacheToken(userId, token, maxAge=60 * 60 * 1000):
key = str(uuid.uuid4())
auth = USERAUTHKEY + str(userId)
with xtredis.pipeline() as pipe:
while True:
try:
pipe.watch(auth)
pipe.multi()
pipe.delete(auth).hset(auth, key, token).expire(auth, maxAge)
pipe.execute()
break
except WatchError:
continue
return key
def getToken(userId, key):
auth = USERAUTHKEY + str(userId)
return xtredis.hget(auth, key)
def extendToken(userId, maxAge=60 * 60 * 1000):
auth = USERAUTHKEY + str(userId)
xtredis.expire(auth, maxAge)
def delToken(userId, key):
auth = USERAUTHKEY + str(userId)
xtredis.hdel(auth, key)
| [
"shzhangyifan@corp.netease.com"
] | shzhangyifan@corp.netease.com |
59bd26a436c815a1a7ffcd5776a932ae68d492c4 | 3df1d0d5946b0b8673d509cfb9b6dd99110450fe | /src/util/icons.py | a3d65166465b23ece394e699b39146a6d9558d66 | [] | no_license | veveykocute/game_launcher | 3374d8069504ed6dcbe844ccb8d725a62699bca4 | 862e8300af8108ea58d06fa59c2e82086cb8f0fb | refs/heads/master | 2021-01-18T08:16:34.661709 | 2015-08-18T22:19:39 | 2015-08-18T22:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | """Routines for finding a game's icon"""
from __future__ import (absolute_import, division, print_function,
with_statement, unicode_literals)
__author__ = "Stephan Sokolow (deitarion/SSokolow)"
__license__ = "MIT"
import os, re
from .common import multiglob_compile
# Files which should be heuristically considered to identify a program's icon
ICON_EXTS = (
'.png', '.xpm',
'.svg', '.svgz', '.svg.gz',
'.jpg', '.jpe', '.jpeg',
'.bmp', '.ico',
)
NON_ICON_NAMES = (
'*background*', 'bg*',
'character*',
'sheet*',
'tile*',
)
NON_ICON_NAMES_RE = multiglob_compile(NON_ICON_NAMES, re_flags=re.I)
def pick_icon(icons, parent_path):
"""Choose the best icon from a set of detected image files.
@todo: Maybe redesign this on a score-based system?
@todo: Support multiple sizes
@todo: Return a fallback list so a failed load can fall back.
@todo: Write a unit test suite as I did for the name guesser.
"""
if not icons:
return None
# Ignore non-icon resources
result = []
for img in icons:
if not NON_ICON_NAMES_RE.match(img):
result.append(img)
icons = result or icons
# Prefer images with icon in the name as icons.
result = []
for icon in icons:
if 'icon' in icon.lower():
result.append(icon)
icons = result or icons
# Prefer images named icon.*
result = []
for icon in icons:
significant = os.path.splitext(icon)[0].lower()
if significant == 'icon':
result.append(icon)
icons = result or icons
# TODO: Prefer square images so we don't wind up using Time Swap's Ouya
# icon by mistake.
# TODO: Prefer SVG > PNG > XPM > BMP > JPEG
# (But try to find a way to prefer timeswapIcon.png over icon.svg
# without resorting to rendering the SVG and picking the one
# that's colour rather than sepia-toned grayscale)
# TODO: Once I've got a regression suite in place, try capturing the
# NEO Scavenger icon by matching for img/*logo.*
# TODO: Need to understand patterns like *_(32|128).png so SuperTuxKart
# reliably gets the bigger icon when it needs to be upscaled.
# TODO: I'll need to extract icons from .exe files in Mono-based games
# like Atom Zombie Smasher which don't offer them separately.
# (Also, it should be possible to get additional cues as to a game's
# name by looking for binaries with the same spelling but different
# capitalization when the folder name is all lowercase)
# TODO: If nothing else matches, look inside things like
# EndlessNuclearKittens.jar to find (16|32|64|128).png
# TODO: Make this smarter
return os.path.join(parent_path, icons[0])
# vim: set sw=4 sts=4 expandtab :
| [
"http://www.ssokolow.com/ContactMe"
] | http://www.ssokolow.com/ContactMe |
95e69f614829e398941039bb5e7c6b54d7912473 | 6a7e9e0e9c08132166f566bd88ae1c46ff8f9c0a | /azure-mgmt-resource/azure/mgmt/resource/policy/v2018_03_01/operations/policy_definitions_operations.py | 35d46e882383af32ab876701a41bdc8be7b23c00 | [
"MIT"
] | permissive | ashirey-msft/azure-sdk-for-python | d92381d11c48f194ec9f989f5f803db614fb73f2 | e04778e13306dad2e8fb044970215bad6296afb6 | refs/heads/master | 2020-03-23T06:05:39.283442 | 2018-09-15T00:18:26 | 2018-09-15T00:18:26 | 141,188,192 | 0 | 1 | MIT | 2018-07-16T20:02:52 | 2018-07-16T20:02:52 | null | UTF-8 | Python | false | false | 31,610 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class PolicyDefinitionsOperations(object):
"""PolicyDefinitionsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: The API version to use for the operation. Constant value: "2018-03-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01"
self.config = config
def create_or_update(
self, policy_definition_name, parameters, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition in a subscription.
This operation creates or updates a policy definition in the given
subscription with the given name.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def delete(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition in a subscription.
This operation deletes the policy definition in the given subscription
with the given name.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a policy definition in a subscription.
This operation retrieves the policy definition in the given
subscription with the given name.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get_built_in(
self, policy_definition_name, custom_headers=None, raw=False, **operation_config):
"""Retrieves a built-in policy definition.
This operation retrieves the built-in policy definition with the given
name.
:param policy_definition_name: The name of the built-in policy
definition to get.
:type policy_definition_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_built_in.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def create_or_update_at_management_group(
self, policy_definition_name, parameters, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Creates or updates a policy definition in a management group.
This operation creates or updates a policy definition in the given
management group with the given name.
:param policy_definition_name: The name of the policy definition to
create.
:type policy_definition_name: str
:param parameters: The policy definition properties.
:type parameters:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.create_or_update_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(parameters, 'PolicyDefinition')
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [201]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 201:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
create_or_update_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def delete_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Deletes a policy definition in a management group.
This operation deletes the policy definition in the given management
group with the given name.
:param policy_definition_name: The name of the policy definition to
delete.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.delete_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200, 204]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
delete_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def get_at_management_group(
self, policy_definition_name, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve a policy definition in a management group.
This operation retrieves the policy definition in the given management
group with the given name.
:param policy_definition_name: The name of the policy definition to
get.
:type policy_definition_name: str
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: PolicyDefinition or ClientRawResponse if raw=true
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition or
~msrest.pipeline.ClientRawResponse
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
# Construct URL
url = self.get_at_management_group.metadata['url']
path_format_arguments = {
'policyDefinitionName': self._serialize.url("policy_definition_name", policy_definition_name, 'str'),
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyDefinition', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
return client_raw_response
return deserialized
get_at_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions/{policyDefinitionName}'}
def list(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieves policy definitions in a subscription.
This operation retrieves a list of all the policy definitions in a
given subscription.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyDefinitions'}
def list_built_in(
self, custom_headers=None, raw=False, **operation_config):
"""Retrieve built-in policy definitions.
This operation retrieves a list of all the built-in policy definitions.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_built_in.metadata['url']
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_built_in.metadata = {'url': '/providers/Microsoft.Authorization/policyDefinitions'}
def list_by_management_group(
self, management_group_id, custom_headers=None, raw=False, **operation_config):
"""Retrieve policy definitions in a management group.
This operation retrieves a list of all the policy definitions in a
given management group.
:param management_group_id: The ID of the management group.
:type management_group_id: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of PolicyDefinition
:rtype:
~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinitionPaged[~azure.mgmt.resource.policy.v2018_03_01.models.PolicyDefinition]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def internal_paging(next_link=None, raw=False):
if not next_link:
# Construct URL
url = self.list_by_management_group.metadata['url']
path_format_arguments = {
'managementGroupId': self._serialize.url("management_group_id", management_group_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(
request, header_parameters, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
deserialized = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies)
if raw:
header_dict = {}
client_raw_response = models.PolicyDefinitionPaged(internal_paging, self._deserialize.dependencies, header_dict)
return client_raw_response
return deserialized
list_by_management_group.metadata = {'url': '/providers/Microsoft.Management/managementgroups/{managementGroupId}/providers/Microsoft.Authorization/policyDefinitions'}
| [
"lmazuel@microsoft.com"
] | lmazuel@microsoft.com |
3a7c5b5f1b703bbb7f47d522088f4a5176a19024 | 67a4db2b8fe501298196375db5bb12960a7b1d58 | /contrib/seeds/generate-seeds.py | 2aad6cae958664fbb60ee61d35714c3a342207bf | [
"MIT"
] | permissive | umoguny/supercoll | cc9b4cbc5d358473e8978c1315cadad399b28c30 | 83b8f68ad308de6659e21219e823e6b2b4632882 | refs/heads/master | 2020-03-28T21:14:12.639871 | 2018-09-17T14:10:30 | 2018-09-17T14:10:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,383 | py | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef DESIRE_CHAINPARAMSSEEDS_H\n')
g.write('#define DESIRE_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the collectiblecoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 9919)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 19919)
g.write('#endif // DESIRE_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
| [
"fastpow@github.com"
] | fastpow@github.com |
6684ca9dd67bacb41767bd65a1c0c1f2dd8193ce | e07f6ac5559d09eb6f5393650af135c7474f5003 | /recent_news.py | e27c23ffb42fa9cdf553ea3b1d714c6870d9ef68 | [] | no_license | Money-fin/backend | 21e188f3f59ccaa216d1ea4bb7b78f670831cb6f | 909961dc33df84ba3663e622bfdf6ab98f915f5f | refs/heads/master | 2022-12-04T08:32:10.094335 | 2020-08-29T09:57:28 | 2020-08-29T09:57:28 | 291,008,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | import requests
import sys
sys.path.append("/home/jylee/backend")
import urllib
import os
from bs4 import BeautifulSoup
import numpy as np
import pandas as pd
from helper import KafkaHelper
def new_crawl(link, kafka=False):
url = link
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
title = soup.select('div.content03 header.title-article01 h1')[0].get_text()
time = soup.select('div.content03 header.title-article01 p')[0].get_text()[4:]
img_url = f"https:{soup.select('div.img-con span img')[0]['src']}"
raw_content = soup.select('div.story-news.article')
# print(raw_content)
content_p = [item.select("p") for item in raw_content]
content_text = [item.get_text().strip() for item in content_p[0]]
content = "\n".join(content_text[1:])
data_dict = {
"title": title,
"content": content,
"link": link
}
if kafka:
KafkaHelper.pub_ninput(data_dict)
else:
data_dict["time"] = time
data_dict["img_url"] = img_url
return data_dict
def recent_new_check():
past_list = ""
while True:
url = f'https://www.yna.co.kr/news?site=navi_latest_depth01'
item_info = requests.get(url).text
soup = BeautifulSoup(item_info, 'html.parser')
new_a_tag = soup.select('div.list-type038 ul')[0].select("li")[0].select("div div a.tit-wrap")
current_link = f"https:{new_a_tag[0]['href']}"
if past_list == current_link:
continue
else:
new_crawl(current_link, True)
past_list = current_link
recent_new_check() | [
"hyanghope@naver.com"
] | hyanghope@naver.com |
bcf019e36ce418fffe064dbcf90f877fb35e974b | 32ce6ae3fd849ec5442cc7fbcb420a25f1f7627d | /impl/CellObjectss.py | aeaba7ea93d500eccfec91a127b99f0ea85774bd | [] | no_license | karaposu/Labyrinth-Game | c73817b74c20ece2c56eaeb97591dd61791660bb | 1e1572c7b506eceefda47914413ea772c8e763a3 | refs/heads/main | 2023-06-04T20:37:29.293096 | 2021-06-28T04:51:49 | 2021-06-28T04:51:49 | 380,907,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | from services.Cell import cell
class PassiveGameObj(cell):
def __init__(self, id, coordinates, designator):
self.id = id
self.coordinates = coordinates
self.designator = designator
def CoordinateUpdate(self, c):
self.coordinates[0] = c[0]
self.coordinates[1] = c[1]
class MovingGameObj(cell):
def __init__(self, id, coordinates, designator):
self.id = id
self.life = 2
self.coordinates = coordinates
self.designator = designator
self.previous_coordinates = coordinates.copy()
def CoordinateUpdate(self, c):
# print("inside p/CoordinateUpdate")
self.coordinates[0] = c[0]
self.coordinates[1] = c[1]
def move(self, m):
pass
def CoordinateCalculate(self, move):
pass
| [
"noreply@github.com"
] | karaposu.noreply@github.com |
2d2c073ecfbd8d904b117c938ab4d01722607f6f | ab57af4f4302160c78147b60ff635876a58bf99d | /pds2/urls.py | 9c9a80da505918ca8a6badfb6e87bc2bf3d2032e | [] | no_license | jrthorne/djbraintree | a523c04fbdb137735c1a68f793f6ffdc2b8a512c | ce28592f7927fa9bd06c7f115d672bd544505caf | refs/heads/master | 2020-06-22T21:21:35.878756 | 2019-07-23T09:52:18 | 2019-07-23T09:52:18 | 198,402,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 327 | py | from django.contrib import admin
from django.urls import path
from django.conf.urls import include, url
from django.views.generic import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^$', TemplateView.as_view(template_name='index.html')),
url(r'^shop/', include('shop.urls'), name='shop'),
]
| [
"jthorne@magiclamp.com.au"
] | jthorne@magiclamp.com.au |
d309ba906885b2264436cea4fe7c0b1cb6487058 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /travelport/models/special_equipment_1.py | d0b34a9eefba484eaeb14ea03e11c478e502ee89 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 1,577 | py | from __future__ import annotations
from dataclasses import dataclass, field
from travelport.models.type_element_status_1 import TypeElementStatus1
__NAMESPACE__ = "http://www.travelport.com/schema/common_v52_0"
@dataclass
class SpecialEquipment1:
"""
Parameters
----------
key
type_value
Special equipment associated with a specific vehicle
el_stat
This attribute is used to show the action results of an element.
Possible values are "A" (when elements have been added to the UR)
and "M" (when existing elements have been modified). Response only.
key_override
If a duplicate key is found where we are adding elements in some
cases like URAdd, then instead of erroring out set this attribute to
true.
"""
class Meta:
name = "SpecialEquipment"
namespace = "http://www.travelport.com/schema/common_v52_0"
key: None | str = field(
default=None,
metadata={
"name": "Key",
"type": "Attribute",
}
)
type_value: None | str = field(
default=None,
metadata={
"name": "Type",
"type": "Attribute",
"required": True,
}
)
el_stat: None | TypeElementStatus1 = field(
default=None,
metadata={
"name": "ElStat",
"type": "Attribute",
}
)
key_override: None | bool = field(
default=None,
metadata={
"name": "KeyOverride",
"type": "Attribute",
}
)
| [
"chris@komposta.net"
] | chris@komposta.net |
37e7b65b2eb87e028e91d5e800045af24ea8b6c0 | b0a217700c563c4f057f2aebbde8faba4b1b26d2 | /software/glasgow/arch/jtag.py | 7c4fe835ca1a2bd2417ce6ed37892e998c03caf9 | [
"0BSD",
"Apache-2.0"
] | permissive | kbeckmann/Glasgow | 5d183865da4fb499099d4c17e878a76192b691e7 | cd31e293cb99ee10a3e4a03ff26f6f124e512c64 | refs/heads/master | 2021-09-15T15:59:38.211633 | 2018-11-15T22:36:04 | 2018-11-22T21:13:59 | 157,077,707 | 3 | 0 | NOASSERTION | 2018-11-11T12:33:49 | 2018-11-11T12:33:48 | null | UTF-8 | Python | false | false | 250 | py | # Ref: IEEE 1149.1
from bitarray import bitarray
from ..support.bits import *
__all__ = [
# DR
"DR_IDCODE",
]
DR_IDCODE = Bitfield("DR_IDCODE", 4, [
("present", 1),
("mfg_id", 11),
("part_id", 16),
("version", 4),
])
| [
"whitequark@whitequark.org"
] | whitequark@whitequark.org |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.