seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
29707449656 | #!/usr/bin/env python
import pybullet as p
import random
import numpy as np
from mamad_util import JointInfo
def check_collision(active_joints_info,num_active_joints):
collision_set=[]
index_of_active_joints = [active_joints_info[i]["jointIndex"] for i in range(num_active_joints)]
for i in index_of_active_joints:
for j in index_of_active_joints:
if i == j:
continue
contact = p.getClosestPoints(fingerID,fingerID,0,i,j)
if len(contact)!=0:
collision_set.append([contact[0][3],contact[0][4]])
check_flip=[]
for i in range(len(collision_set)):
index_1=collision_set[i][0]
index_2=collision_set[i][1]
for j in range(i,len(collision_set)):
if i == j:
continue
if index_1 == collision_set[j][1] and index_2 == collision_set[j][0]:
check_flip.append(j)
new_check=[]
sort=np.argsort(check_flip)
for i in range(len(check_flip)):
new_check.append(check_flip[sort[i]])
for i in range(len(check_flip)):
del collision_set[new_check[i]-i]
check_parent=[]
for i in range(len(parent_list)):
index_parent_1=parent_list[i][0]
index_parent_2=parent_list[i][1]
for j in range(len(collision_set)):
if index_parent_1 == collision_set[j][0] and index_parent_2 == collision_set[j][1]:
check_parent.append(j)
if index_parent_1 == collision_set[j][1] and index_parent_2 == collision_set[j][0]:
check_parent.append(j)
new_check_parent=[]
sort_parent=np.argsort(check_parent)
for i in range(len(check_parent)):
new_check_parent.append(check_parent[sort_parent[i]])
for i in range(len(check_parent)):
del collision_set[new_check_parent[i]-i]
collision_result=[]
for i in range (len(collision_set)):
index_collision_set_1=collision_set[i][0]
index_collision_set_2=collision_set[i][1]
for j in range(num_active_joints):
if index_collision_set_1 == active_joints_info[j]["jointIndex"]:
index_collision_set_1_result = j
if index_collision_set_2 == active_joints_info[j]["jointIndex"]:
index_collision_set_2_result = j
collision_result.append([active_joints_info[index_collision_set_1_result]["linkName"],active_joints_info[index_collision_set_2_result]["linkName"]])
return collision_result
p.connect(p.GUI)
p.setGravity(0,0,-9.8)
finger = p.loadSDF("./model.sdf")
fingerID = finger[0]
jointInfo = JointInfo()
jointInfo.get_infoForAll_joints(finger)
active_joints_info = jointInfo.getActiveJointsInfo()
num_active_joints = jointInfo.getNumberOfActiveJoints()
num_joints = p.getNumJoints(fingerID)
# print("active_joints_info::",active_joints_info)
# print("finger::",finger)
# print("`num of joints:::",num_joints)
"""
for i in range(num_joints):
j_info = p.getJointInfo(fingerID,i)
print("joint_info::",j_info)
"""
# texUid = p.loadTexture("./../cube_new/aaa.png")
# cube_objects = p.loadSDF("./../cube_new/model.sdf")
# p.changeVisualShape(cube_objects[0], -1, rgbaColor=[1, 1, 1, 1])
# p.changeVisualShape(cube_objects[0], -1, textureUniqueId=texUid)
# p.resetBasePositionAndOrientation(cube_objects[0], [0, 0.37, 0.07],[0.7071, 0.000000, 0.000000, 0.7071])
p.setRealTimeSimulation(0)
p.setTimeStep(1./5000)
while(1):
p.resetBasePositionAndOrientation(fingerID, [0, 0, 0],[0.7071, 0.000000, 0.000000, -0.7071])
parent_list=[]
for i in range(num_active_joints):
jointIndex = active_joints_info[i]["jointIndex"]
jointName = active_joints_info[i]["jointName"]
linkName = active_joints_info[i]["linkName"]
jointPositionState = p.getJointState(fingerID,jointIndex)[0]
# print("linkName::",linkName)
# print("jointName::",jointName)
# print("jointIndex::",jointIndex)
# print("jointPositionState::",jointPositionState)
jointll = active_joints_info[i]["jointLowerLimit"]
jointul = active_joints_info[i]["jointUpperLimit"]
# print("lower limit",jointll)
# print("upper limit",jointul)
motor_command = jointPositionState
parent_list.append([jointIndex,jointInfo.searchBy("jointIndex",jointIndex)[0]["parentIndex"]])
if jointIndex == 3:
step =(abs(jointll)-abs(jointul))/100
motor_command = jointPositionState+0.0
p.setJointMotorControl2(fingerID,jointIndex,p.POSITION_CONTROL,motor_command, force=1.0)
collision_result=check_collision(active_joints_info,num_active_joints)
#print("right hand self coliision -------",collision_set)
print("right hand self coliision -------",collision_result)
print("\n")
p.stepSimulation()
| ccylance/theis-code | gym_test/gym_test/envs/shadow_hand_vijay/gym_test.py | gym_test.py | py | 4,379 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pybullet.getClosestPoints",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.argsort",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "pybullet.connec... |
6814540794 | from django.urls import path
from . import views
################################################################################
# Registering the app namespace...
# this will allow you to create dynamic Django hyperlinks in html files
# when using the django tag: {% url atomic:tracker ... %} for example.
app_name = "atomic"
################################################################################
urlpatterns = [
path("", views.index, name="index"),
path("tracker/", views.tracker, name="tracker"),
path("about/", views.about, name="about"),
path("api/", views.api, name="api"),
path("api/updateDB/", views.updateDB, name="updateDB")
] | chinchay/habit-tracker | backend/atomic/urls.py | urls.py | py | 669 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
16293536002 | import os
from time import sleep
import boto3
from botocore.exceptions import ClientError
IAM_R = boto3.resource('iam')
IAM_C = boto3.client('iam')
LAMBDA_C = boto3.client('lambda')
EVENTS_C = boto3.client('events')
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
def setup_iam_role():
"""
Setup the AWS IAM role
"""
try:
IAM_C.get_role(RoleName='aws_monitor')
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchEntity':
with open('{}/lambda_role_policy.json'.format(BASE_DIR), 'r') as policy_file:
policy = policy_file.read()
IAM_C.create_role(RoleName='aws_monitor',
AssumeRolePolicyDocument=policy)
else:
raise err
for pol in ['ec2_access', 'sns_access', 'cloudwatch_access', 'rds_access',
'as_access', 's3_access']:
with open('{}/{}.json'.format(BASE_DIR, pol), 'r') as policy_file:
policy = policy_file.read()
IAM_C.put_role_policy(RoleName='aws_monitor',
PolicyName=pol,
PolicyDocument=policy)
try:
IAM_C.get_instance_profile(InstanceProfileName='aws_monitor')
except ClientError as err:
if err.response['Error']['Code'] == 'NoSuchEntity':
IAM_C.create_instance_profile(InstanceProfileName='aws_monitor')
else:
raise err
role_instance_profiles = IAM_C.list_instance_profiles_for_role(RoleName='aws_monitor')
add_instance_profile = True
for profile in role_instance_profiles['InstanceProfiles']:
if profile['InstanceProfileName'] == 'aws_monitor':
add_instance_profile = False
if add_instance_profile:
IAM_C.add_role_to_instance_profile(InstanceProfileName='aws_monitor',
RoleName='aws_monitor')
return IAM_R.Role('aws_monitor')
def configure_vpc():
"""
Provide vpc/sg for lambda function
"""
vpc_config = {}
subnet_id = os.environ.get('SUBNET_ID')
security_group_id = os.environ.get('SECURITY_GROUP_ID')
if subnet_id:
vpc_config['SubnetIds'] = [subnet_id]
if security_group_id:
vpc_config['SecurityGroupIds'] = [security_group_id]
return vpc_config
def upload_lambda_function():
"""
main function of deployment.
Ensure IAM is setup. Upload zip. Create function.
"""
vpc_config = configure_vpc()
role = setup_iam_role()
rule = EVENTS_C.put_rule(Name='DiscoverInstancesSchedule',
ScheduleExpression=os.environ.get('DISCOVERY_SCHEDULE'),
State='ENABLED',
Description='Run the instance discovery')
with open('{}/../aws_monitor.zip'.format(BASE_DIR), 'rb') as zip_file:
zip_bytes = zip_file.read()
fcn = {}
try:
LAMBDA_C.get_function(FunctionName='DiscoverInstances')
fcn = LAMBDA_C.update_function_code(FunctionName='DiscoverInstances',
ZipFile=zip_bytes,
Publish=True)
except ClientError as err:
if err.response['Error']['Code'] == 'ResourceNotFoundException':
sleep(10)
fcn = LAMBDA_C.create_function(FunctionName='DiscoverInstances',
Code={'ZipFile': zip_bytes},
Runtime='python2.7',
Role=role.arn,
Handler='zumoco.main',
Timeout=300,
Description="Discover, add cloudwatch alerts",
MemorySize=128,
VpcConfig=vpc_config)
else:
raise err
try:
LAMBDA_C.add_permission(FunctionName='DiscoverInstances',
StatementId='DiscoverInstancesSchedule-Permission',
Action='lambda:InvokeFunction',
Principal='events.amazonaws.com',
SourceArn=rule['RuleArn'])
except ClientError as err:
if err.response['Error']['Code'] != 'ResourceConflictException':
# ignore conflicts if the rule exists
raise err
EVENTS_C.put_targets(Rule='DiscoverInstancesSchedule',
Targets=[{'Id': 'DiscoverInstances-schedule',
'Arn': fcn['FunctionArn'],}])
upload_lambda_function()
| zulily/aws_monitor | deployscripts/setup_lambda.py | setup_lambda.py | py | 4,849 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "boto3.resource",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number... |
19886880930 | from guardata.client.client_events import ClientEvent
import pytest
from unittest.mock import ANY
from pendulum import datetime
from guardata.api.data import UserManifest, WorkspaceEntry
from guardata.client.types import WorkspaceRole, LocalUserManifest, EntryID
from guardata.client.fs import (
FSError,
FSWorkspaceNotFoundError,
FSBackendOfflineError,
FSSharingNotAllowedError,
)
from backendService.realm import RealmGrantedRole, RealmRole
from tests.common import freeze_time, create_shared_workspace
@pytest.mark.trio
async def test_share_unknown(running_backend, alice_user_fs, bob):
wid = EntryID()
with pytest.raises(FSWorkspaceNotFoundError):
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
@pytest.mark.trio
async def test_share_to_oneself(running_backend, alice_user_fs, alice):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with pytest.raises(FSError) as exc:
await alice_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
assert str(exc.value) == "Cannot share to oneself"
@pytest.mark.trio
async def test_share_bad_recipient(running_backend, alice_user_fs, alice, mallory):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with pytest.raises(FSError) as exc:
await alice_user_fs.workspace_share(wid, mallory.user_id, WorkspaceRole.MANAGER)
assert str(exc.value) == "User `mallory` doesn't exist in backend"
@pytest.mark.trio
async def test_share_offline(running_backend, alice_user_fs, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
with running_backend.offline():
with pytest.raises(FSBackendOfflineError):
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
@pytest.mark.trio
@pytest.mark.parametrize("presynced", (True, False))
async def test_share_ok(running_backend, alice_user_fs, bob_user_fs, alice, bob, presynced):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
if presynced:
await alice_user_fs.sync()
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=WorkspaceRole.MANAGER,
),
"previous_entry": None,
},
)
aum = alice_user_fs.get_user_manifest()
bum = bob_user_fs.get_user_manifest()
assert len(aum.workspaces) == 1
assert len(bum.workspaces) == 1
awe = aum.get_workspace_entry(wid)
bwe = bum.get_workspace_entry(wid)
assert bwe.name == "w1"
assert bwe.id == awe.id
assert bwe.role == WorkspaceRole.MANAGER
aw = alice_user_fs.get_workspace(wid)
bw = bob_user_fs.get_workspace(wid)
aw_stat = await aw.path_info("/")
bw_stat = await bw.path_info("/")
assert aw_stat == bw_stat
@pytest.mark.trio
async def test_share_workspace_then_rename_it(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Share a workspace between Alice and Bob
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
# Now Bob and alice both rename the workpsace for there own taste
await bob_user_fs.workspace_rename(wid, "from_alice")
await alice_user_fs.workspace_rename(wid, "to_bob")
await bob_user_fs.sync()
await alice_user_fs.sync()
# This should have not changed the workspace in any way
bw = bob_user_fs.get_workspace(wid)
aw = alice_user_fs.get_workspace(wid)
await bw.touch("/ping_bob.txt")
await aw.mkdir("/ping_alice")
await bw.sync()
await aw.sync()
await bw.sync()
aw_stat = await aw.path_info("/")
bw_stat = await bw.path_info("/")
assert aw_stat == bw_stat
assert aw_stat["id"] == wid
@pytest.mark.trio
async def test_unshare_ok(running_backend, alice_user_fs, bob_user_fs, alice, bob):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.OWNER)
await bob_user_fs.process_last_messages()
# ...and unshare it
await bob_user_fs.workspace_share(wid, alice.user_id, None)
with alice_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=None,
),
"previous_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.OWNER,
),
},
)
aum = alice_user_fs.get_user_manifest()
aw = aum.workspaces[0]
assert not aw.role
# TODO: check workspace access is no longer possible
@pytest.mark.trio
async def test_unshare_not_shared(running_backend, alice_user_fs, bob_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, None)
with alice_user_fs.event_bus.listen() as spy:
await bob_user_fs.process_last_messages()
assert not spy.events
# Workspace unsharing should have been ignored
bum = bob_user_fs.get_user_manifest()
assert not bum.workspaces
@pytest.mark.trio
async def test_share_to_another_after_beeing_unshared(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
await bob_user_fs.process_last_messages()
# ...and unshare it
await alice_user_fs.workspace_share(wid, bob.user_id, None)
await bob_user_fs.process_last_messages()
# Shouldn't be able to share the workspace anymore
with pytest.raises(FSSharingNotAllowedError):
await bob_user_fs.workspace_share(wid, alice.user_id, None)
@pytest.mark.trio
async def test_reshare_workspace(running_backend, alice_user_fs, bob_user_fs, alice, bob):
# Share a workspace...
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-03"):
await bob_user_fs.process_last_messages()
# ...and unshare it...
await alice_user_fs.workspace_share(wid, bob.user_id, None)
with freeze_time("2000-01-04"):
await bob_user_fs.process_last_messages()
# ...and re-share it !
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-05"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.MANAGER,
),
"previous_entry": WorkspaceEntry(
name="w1",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 4),
role=None,
),
},
)
# Check access
aum = alice_user_fs.get_user_manifest()
bum = bob_user_fs.get_user_manifest()
assert len(aum.workspaces) == 1
assert len(bum.workspaces) == 1
aw = aum.workspaces[0]
bw = bum.workspaces[0]
assert bw.name == "w1"
assert bw.id == aw.id
assert bw.role == WorkspaceRole.MANAGER
@pytest.mark.trio
async def test_share_with_different_role(running_backend, alice_user_fs, bob_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
aum = alice_user_fs.get_user_manifest()
aw = aum.workspaces[0]
previous_entry = None
for role in WorkspaceRole:
# (re)share with rights
await alice_user_fs.workspace_share(wid, bob.user_id, role)
with bob_user_fs.event_bus.listen() as spy:
await bob_user_fs.process_last_messages()
new_entry = spy.partial_obj(WorkspaceEntry, name="w1", id=wid, role=role)
if not previous_entry:
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED, {"new_entry": new_entry, "previous_entry": None}
)
else:
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": new_entry, "previous_entry": previous_entry},
)
previous_entry = new_entry
# Check access
bum = bob_user_fs.get_user_manifest()
assert len(bum.workspaces) == 1
bw = bum.workspaces[0]
assert bw.name == "w1"
assert bw.id == aw.id
assert bw.role == role
@pytest.mark.trio
async def test_share_no_manager_right(running_backend, alice_user_fs, alice, bob):
with freeze_time("2000-01-02"):
wid = await alice_user_fs.workspace_create("w1")
await alice_user_fs.sync()
# Drop manager right (and give to Bob the ownership)
await running_backend.backend.realm.update_roles(
alice.organization_id,
RealmGrantedRole(
realm_id=wid,
user_id=bob.user_id,
certificate=b"<dummy>",
role=RealmRole.OWNER,
granted_by=alice.device_id,
granted_on=datetime(2000, 1, 3),
),
)
await running_backend.backend.realm.update_roles(
alice.organization_id,
RealmGrantedRole(
realm_id=wid,
user_id=alice.user_id,
certificate=b"<dummy>",
role=RealmRole.CONTRIBUTOR,
granted_by=bob.device_id,
granted_on=datetime(2000, 1, 4),
),
)
with pytest.raises(FSSharingNotAllowedError) as exc:
await alice_user_fs.workspace_share(wid, bob.user_id, WorkspaceRole.MANAGER)
assert (
exc.value.message
== "Must be Owner or Manager on the workspace is mandatory to share it: {'status': 'not_allowed'}"
)
@pytest.mark.trio
async def test_share_with_sharing_name_already_taken(
running_backend, alice_user_fs, bob_user_fs, alice, bob
):
# Bob and Alice both has a workspace with similar name
with freeze_time("2000-01-01"):
awid = await alice_user_fs.workspace_create("w")
bwid = await bob_user_fs.workspace_create("w")
bw2id = await bob_user_fs.workspace_create("w")
# Sharing them shouldn't be a trouble
await bob_user_fs.sync()
await alice_user_fs.workspace_share(awid, bob.user_id, WorkspaceRole.MANAGER)
# Bob should get a notification
with bob_user_fs.event_bus.listen() as spy:
with freeze_time("2000-01-02"):
await bob_user_fs.process_last_messages()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{
"new_entry": WorkspaceEntry(
name="w",
id=awid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
),
"previous_entry": None,
},
)
assert len(bob_user_fs.get_user_manifest().workspaces) == 3
b_aw_stat = await bob_user_fs.get_workspace(awid).path_info("/")
a_aw_stat = await alice_user_fs.get_workspace(awid).path_info("/")
b_aw_stat.pop("need_sync")
a_aw_stat.pop("need_sync")
assert b_aw_stat == a_aw_stat
b_bw_stat = await bob_user_fs.get_workspace(bwid).path_info("/")
assert b_bw_stat["id"] == bwid
b_bw2_stat = await bob_user_fs.get_workspace(bw2id).path_info("/")
assert b_bw2_stat["id"] == bw2id
@pytest.mark.trio
@pytest.mark.parametrize("first_to_sync", ("alice", "alice2"))
async def test_share_workspace_then_conflict_on_rights(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, alice2, bob, first_to_sync
):
# Bob shares a workspace with Alice...
with freeze_time("2000-01-01"):
wid = await bob_user_fs.workspace_create("w")
with freeze_time("2000-01-02"):
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
# ...but only Alice's first device get the information
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
# Now Bob change the sharing rights...
with freeze_time("2000-01-04"):
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.CONTRIBUTOR)
# ...this time it's Alice's second device which get the info
with freeze_time("2000-01-05"):
# Note we will process the 2 sharing messages bob sent us, this
# will attribute role_cached_on to the first message timestamp even
# if we cache the second message role...
await alice2_user_fs.process_last_messages()
if first_to_sync == "alice":
first = alice_user_fs
second = alice2_user_fs
synced_timestamp = datetime(2000, 1, 7)
synced_version = 3
else:
first = alice2_user_fs
second = alice_user_fs
synced_timestamp = datetime(2000, 1, 6)
synced_version = 2
# Finally Alice devices try to reconciliate
with freeze_time("2000-01-06"):
await first.sync()
with freeze_time("2000-01-07"):
await second.sync()
# Resync first device to get changes from the 2nd
with freeze_time("2000-01-08"):
await first.sync()
am = alice_user_fs.get_user_manifest()
a2m = alice2_user_fs.get_user_manifest()
expected_remote = UserManifest(
author=alice2.device_id,
timestamp=synced_timestamp,
id=alice2.user_manifest_id,
version=synced_version,
created=datetime(2000, 1, 1),
updated=datetime(2000, 1, 5),
last_processed_message=2,
workspaces=(
WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.CONTRIBUTOR,
),
),
)
expected = LocalUserManifest(
base=expected_remote,
need_sync=False,
updated=expected_remote.updated,
last_processed_message=expected_remote.last_processed_message,
workspaces=expected_remote.workspaces,
)
assert am == expected
assert a2m == expected
a_w = alice_user_fs.get_workspace(wid)
a2_w = alice2_user_fs.get_workspace(wid)
a_w_stat = await a_w.path_info("/")
a2_w_stat = await a2_w.path_info("/")
a_w_entry = a_w.get_workspace_entry()
a2_w_entry = a2_w.get_workspace_entry()
assert a_w_stat == {
"type": "folder",
"is_placeholder": False,
"id": wid,
"created": ANY,
"updated": ANY,
"base_version": 1,
"need_sync": False,
"children": [],
"confined": False,
}
assert a_w_stat == a2_w_stat
assert a_w_entry == WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 1),
role_cached_on=datetime(2000, 1, 5),
role=WorkspaceRole.CONTRIBUTOR,
)
assert a2_w_entry == a_w_entry
@pytest.mark.trio
async def test_sharing_events_triggered_on_sync(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a first workspace
with freeze_time("2000-01-02"):
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs)
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v1 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED, {"new_entry": expected_entry_v1, "previous_entry": None}
)
# Change role
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.OWNER)
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v2 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 3),
role=WorkspaceRole.OWNER,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v2, "previous_entry": expected_entry_v1},
)
# Revoke
await bob_user_fs.workspace_share(wid, alice.user_id, None)
with freeze_time("2000-01-04"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
expected_entry_v3 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 4),
role=None,
)
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v3, "previous_entry": expected_entry_v2},
)
@pytest.mark.trio
async def test_no_sharing_event_on_sync_on_unknown_workspace(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a workspace...
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs)
# ...and unshare it before alice2 even know about it
await bob_user_fs.workspace_share(wid, alice.user_id, None)
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
# No sharing event should be triggered !
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
spy.assert_events_exactly_occured([ClientEvent.FS_ENTRY_REMOTE_CHANGED])
@pytest.mark.trio
async def test_sharing_event_on_sync_if_same_role(
running_backend, alice_user_fs, alice2_user_fs, bob_user_fs, alice, bob
):
# Share a workspace, alice2 knows about it
with freeze_time("2000-01-02"):
wid = await create_shared_workspace("w", bob_user_fs, alice_user_fs, alice2_user_fs)
expected_entry_v1 = WorkspaceEntry(
name="w",
id=wid,
key=ANY,
encryption_revision=1,
encrypted_on=datetime(2000, 1, 2),
role_cached_on=datetime(2000, 1, 2),
role=WorkspaceRole.MANAGER,
)
# Then change alice's role...
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.OWNER)
with freeze_time("2000-01-03"):
await alice_user_fs.process_last_messages()
await alice_user_fs.sync()
# ...and give back alice the same role
await bob_user_fs.workspace_share(wid, alice.user_id, WorkspaceRole.MANAGER)
with freeze_time("2000-01-04"):
await alice_user_fs.process_last_messages()
expected_entry_v3 = expected_entry_v1.evolve(role_cached_on=datetime(2000, 1, 4))
await alice_user_fs.sync()
# A single sharing event should be triggered
with alice2_user_fs.event_bus.listen() as spy:
await alice2_user_fs.sync()
spy.assert_event_occured(
ClientEvent.SHARING_UPDATED,
{"new_entry": expected_entry_v3, "previous_entry": expected_entry_v1},
)
| bitlogik/guardata | tests/client/fs/userfs/test_sharing.py | test_sharing.py | py | 21,167 | python | en | code | 9 | github-code | 6 | [
{
"api_name": "guardata.client.types.EntryID",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "guardata.client.fs.FSWorkspaceNotFoundError",
"line_number": 22,
"usage_type": "argument"
}... |
33963223285 | from http import HTTPStatus
from django.test import TestCase, Client
class AboutTests(TestCase):
def setUp(self):
self.guest_client = Client()
def test_about_urls_uses_correct_templates(self):
templates_url_names_quest = {
'/about/author/': 'about/author.html',
'/about/tech/': 'about/tech.html'
}
for address, template in templates_url_names_quest.items():
with self.subTest(address=address):
response = self.guest_client.get(address)
self.assertEqual(
response.status_code,
HTTPStatus.OK
)
| Mashabor/hw05_final | yatube/about/tests.py | tests.py | py | 660 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.test.Client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "http.HTTPStatus.OK",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "http.HT... |
20594474782 | import torch
import torch.nn.functional as F
def global_align_loss(
visual_embed,
textual_embed,
labels,
mixture=False,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
visual_norm = F.normalize(visual_embed, p=2, dim=1)
textual_norm = F.normalize(textual_embed, p=2, dim=1)
similarity = torch.matmul(visual_norm, textual_norm.t())
labels_ = (
labels.expand(batch_size, batch_size)
.eq(labels.expand(batch_size, batch_size).t())
.float()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
loss_pos = torch.log(1 + torch.exp(-scale_pos * (similarity[pos_inds] - alpha)))
loss_neg = torch.log(1 + torch.exp(scale_neg * (similarity[neg_inds] - beta)))
loss = (loss_pos.sum() + loss_neg.sum()) * 2.0
if mixture:
margin = alpha - beta
tmp = similarity
tmp[neg_inds] = 1
hard_v_pos, _ = torch.min(tmp, dim=1)
hard_t_pos, _ = torch.min(tmp, dim=0)
tmp = similarity
tmp[pos_inds] = 0
hard_v_neg, _ = torch.max(tmp, dim=1)
hard_t_neg, _ = torch.max(tmp, dim=0)
# y = torch.ones_like(hard_v_neg)
# loss_v_dist = F.margin_ranking_loss(hard_v_neg, hard_v_pos, y, margin=margin, reduction="sum")
# loss_t_dist = F.margin_ranking_loss(hard_t_neg, hard_t_pos, y, margin=margin, reduction="sum")
v_dist = hard_v_pos - hard_v_neg
t_dist = hard_t_pos - hard_t_neg
loss_v_dist = torch.log(1 + torch.exp(margin - v_dist))
loss_t_dist = torch.log(1 + torch.exp(margin - t_dist))
loss = loss + loss_t_dist.sum() + loss_v_dist.sum()
loss /= batch_size
return loss
def global_align_loss_from_sim(
similarity,
labels,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
labels_ = (
labels.expand(batch_size, batch_size)
.eq(labels.expand(batch_size, batch_size).t())
.float()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
loss_pos = torch.log(1 + torch.exp(-scale_pos * (similarity[pos_inds] - alpha)))
loss_neg = torch.log(1 + torch.exp(scale_neg * (similarity[neg_inds] - beta)))
loss = (loss_pos.sum() + loss_neg.sum()) * 2.0
loss /= batch_size
return loss
def local_align_no_sampling_loss(
part_embed,
attr_embed,
labels,
part_masks,
attr_masks,
num_parts=5,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
):
batch_size = labels.size(0)
part_embed = F.normalize(part_embed, p=2, dim=2)
attr_embed = F.normalize(attr_embed, p=2, dim=2)
labels_ = labels.expand(batch_size, batch_size).eq(
labels.expand(batch_size, batch_size).t()
)
pos_inds = labels_ == 1
neg_inds = labels_ == 0
local_loss = 0.0
for i in range(num_parts):
filter_inds = torch.ones_like(labels_)
filter_inds[~attr_masks[:, i], :] = 0
filter_inds[:, ~part_masks[:, i]] = 0
filter_pos_inds = filter_inds & pos_inds
filter_neg_inds = filter_inds & neg_inds
local_similarity = torch.matmul(attr_embed[i], part_embed[i].t())
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (local_similarity[filter_pos_inds] - alpha))
)
loss_neg = torch.log(
1 + torch.exp(scale_neg * (local_similarity[filter_neg_inds] - beta))
)
local_loss += (loss_pos.sum() + loss_neg.sum()) * 2.0
return local_loss / batch_size / num_parts
def local_align_loss(
part_embed,
attribute_embed,
labels,
part_masks,
attr_masks,
num_parts=5,
alpha=0.6,
beta=0.4,
scale_pos=10,
scale_neg=40,
topK=8,
):
batch_size = labels.size(0)
part_embed = F.normalize(part_embed, p=2, dim=2)
attribute_embed = F.normalize(attribute_embed, p=2, dim=2)
labels_ = labels.expand(batch_size, batch_size).eq(
labels.expand(batch_size, batch_size).t()
)
losses = 0
for i in range(num_parts):
part_mask = part_masks[:, i]
attr_mask = attr_masks[:, i]
similarity = torch.matmul(part_embed[i], attribute_embed[i].t())
rank1 = torch.argsort(similarity, dim=1, descending=True)
rank2 = torch.argsort(similarity.t(), dim=1, descending=True)
loss = 0
for j in range(batch_size):
if part_mask[j] == 0:
continue
pred = similarity[j, attr_mask]
# k-reciprocal sample
label = labels_[j, :].float()
forward_k_idx = rank1[i, :topK]
backward_k_idx = rank2[forward_k_idx, :topK]
sample_pos_idx = torch.nonzero(backward_k_idx == i)[:, 0]
sample_pos_idx = torch.unique(forward_k_idx[sample_pos_idx])
label[sample_pos_idx] = 1
label = label[attr_mask]
pos_inds = torch.nonzero(label == 1).squeeze(1)
neg_inds = torch.nonzero(label == 0).squeeze(1)
if pos_inds.numel() > 0:
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (pred[pos_inds] - alpha))
)
loss += loss_pos.sum()
if neg_inds.numel() > 0:
loss_neg = torch.log(1 + torch.exp(scale_neg * (pred[neg_inds] - beta)))
loss += loss_neg.sum()
if attr_mask[j] == 0:
continue
pred = similarity[part_mask, j]
# k-reciprocal sample
label = labels_[j, :].float()
forward_k_idx = rank2[i, :topK]
backward_k_idx = rank1[forward_k_idx, :topK]
sample_pos_idx = torch.nonzero(backward_k_idx == i)[:, 0]
sample_pos_idx = torch.unique(forward_k_idx[sample_pos_idx])
label[sample_pos_idx] = 1
label = label[part_mask]
pos_inds = torch.nonzero(label == 1).squeeze(1)
neg_inds = torch.nonzero(label == 0).squeeze(1)
if pos_inds.numel() > 0:
loss_pos = torch.log(
1 + torch.exp(-scale_pos * (pred[pos_inds] - alpha))
)
loss += loss_pos.sum()
if neg_inds.numel() > 0:
loss_neg = torch.log(1 + torch.exp(scale_neg * (pred[neg_inds] - beta)))
loss += loss_neg.sum()
loss /= batch_size
losses += loss
losses /= num_parts
return losses
| CCNU-DigitalLibrary/CCNU-DigitalLibrary | MCM-HC/lib/models/losses/align_loss.py | align_loss.py | py | 6,662 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.nn.functional.normalize",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.nn.functional.normalize",
"line_number": 17,
"usage_type": "call"
},
{
"a... |
25495485263 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 27 17:39:39 2020
@author: satya
"""
import pandas as pd
import scipy.cluster.hierarchy as sch
from sklearn.cluster import DBSCAN
data=pd.read_csv('cars_clus.csv')
featureset = data[['engine_s', 'horsepow', 'wheelbas', 'width', 'length', 'curb_wgt', 'fuel_cap', 'mpg']]
featureset=featureset.dropna()
featureset=featureset.replace('$null$',0)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
featureset=sc.fit_transform(featureset)
from sklearn.cluster import AgglomerativeClustering
dendogram=sch.dendrogram(sch.linkage(featureset,method='ward'))
plt.show()
hc=AgglomerativeClustering(n_clusters=5,affinity='euclidean',linkage='ward')
y=hc.fit_predict(featureset)
df=DBSCAN(eps=0.3,min_samples=2)
y=df.fit(featureset)
y=y.labels_
sample_cores=np.zeros_like(y)
sample_cores[df.core_sample_indices_]=True
np.unique(y) | Satyake/Deep-Learning | DBSCAN and HC.py | DBSCAN and HC.py | py | 928 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.cluster.hierarchy.dendrogram",
"line_number": 23,
"usage_type": "call"
},
... |
17351623183 | import vertexai
from vertexai.language_models import ChatModel, InputOutputTextPair
vertexai.init(project="samwin", location="us-central1")
chat_model = ChatModel.from_pretrained("chat-bison@001")
parameters = {
"max_output_tokens": 256,
"temperature": 0.2,
"top_p": 0.8,
"top_k": 40
}
chat = chat_model.start_chat(
context="""you are a doctor that is responsible for answering queries that patients regarding their prescription. Also you are not allowed to tell that you are an AI model and have to answer the question with full confidence""",
examples=[
InputOutputTextPair(
input_text="""hello """,
output_text="""hey there how can I help you out """
),
InputOutputTextPair(
input_text="""can you tell me your name """,
output_text="""My name is doctor Charis your person medical assistant """
),
InputOutputTextPair(
input_text="""is it dangerous to take Asparin during Dengu""",
output_text="""yes it is dangerous to take Asparin as it is a blood thinner and can cause excess thinning """
)
]
)
data = input("enter the text: ")
response = chat.send_message(f'{data}' , **parameters)
print(f"Response from the model {response.text}") | samwinp/rock-paper-sisor | future.py | future.py | py | 1,288 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "vertexai.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "vertexai.language_models.ChatModel.from_pretrained",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "vertexai.language_models.ChatModel",
"line_number": 5,
"usage_type": "name... |
2736213027 | from keras.optimizers import Nadam, Optimizer
from keras import backend as K
class Nadam_entropy(Nadam):
def __init__(self, temperature=0.1, **kwargs):
self.temperature = temperature
super(Nadam_entropy, self).__init__(**kwargs)
def get_gradients(self, loss, params):
grads = K.gradients(loss, params)
probs = grads
for i in range(len(params)):
grads[i] /= params[i] + K.epsilon()
#probs = grads / (params + K.epsilon())
probs = K.abs(probs)
probs /= K.sum(K.flatten(probs)) + K.epsilon()
Ts = -self.temperature*K.sum(K.flatten(probs * K.log(probs)))
delta_s = K.gradients(Ts, params)
for i in range(len(grads)):
grads[i] = grads[i] + delta_s[i]
# grads = grads + delta_s
if hasattr(self, 'clipnorm') and self.clipnorm > 0:
norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
if hasattr(self, 'clipvalue') and self.clipvalue > 0:
grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
return grads
| twoev/APEMEN | utils/optimisers.py | optimisers.py | py | 1,081 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "keras.optimizers.Nadam",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "keras.backend.gradients",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "keras.backend",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "keras.bac... |
75319095866 | import random
from pypinyin import lazy_pinyin
from nonebot import require, on_command, on_message, on_keyword, on_shell_command, on_request
from nonebot.rule import command
from nonebot.permission import SUPERUSER
from nonebot.typing import T_State,T_Handler
from nonebot.adapters.cqhttp.bot import Bot
from nonebot.adapters.cqhttp.message import Message, MessageSegment
from nonebot.adapters.cqhttp.event import MessageEvent, GroupMessageEvent, GroupRequestEvent
from nonebot.adapters.cqhttp.permission import PRIVATE, GROUP, GROUP_ADMIN, GROUP_OWNER
from nonebot.adapters.cqhttp.utils import unescape, escape
from src.utils.util import gen_parser, call_api_delay
from .data_source import get_group_id_list, gen_qq, gentracker
__doc__ = '''to -[ugsabf] [args,]
-u: 私聊,args为 私聊对象qq号 消息
-g: 群聊,args为 群聊群qq号 消息
-s: 多个消息目标,args为 qq号 qq号 qq号 消息
-a: 以所有群聊为消息目标,args为 消息
-b: 只有-a时生效,以除了某群的所有群聊为消息目标,args为 qq号 消息
-f: 结束当前会话
'''
to_cmd = on_command('to', aliases={'转发'}, permission=SUPERUSER)
to_parser = gen_parser()
to_parser.add_argument('-u', dest='to_user', action='store_true')
to_parser.add_argument('-g', dest='to_group', action='store_true')
to_parser.add_argument('-s', dest='several', action='store_true')
to_parser.add_argument('-a', dest='all_group', action='store_true')
to_parser.add_argument('-b', dest='ban', action='store_true')
@to_cmd.handle()
async def first_receive(bot: Bot, event: MessageEvent, state: T_State):
msg = str(event.message).strip()
if msg:
state['args'] = msg
@to_cmd.got('args', __doc__)
async def _(bot: Bot, state: T_State):
args = state['args'].split(None, 1)
if args[0] == state['_prefix']['raw_command']:
args = args[1].split(None, 1)
try:
cmd = to_parser.parse_args([args[0]])
except Exception as e:
await to_cmd.finish('命令解析失败' + str(e))
return
if args[0] == args[-1]:
await to_cmd.reject('命令缺少[args,]\n' + __doc__)
param = args[-1]
if cmd.help:
await to_cmd.reject(__doc__)
elif cmd.finish:
await to_cmd.finish('本次命令结束')
if cmd.several:
qq_list = list(gen_qq(param))
if cmd.to_user:
for qq in qq_list[:-1]:
await bot.send_private_msg(user_id=qq, message=unescape(qq_list[-1]))
elif cmd.to_group:
for qq in qq_list[:-1]:
await bot.send_group_msg(group_id=qq, message=unescape(qq_list[-1]))
elif cmd.all_group:
group_list = await get_group_id_list(bot)
if cmd.ban:
qq_list = list(gen_qq(param))
for qq in (i for i in group_list if i not in qq_list):
await bot.send_group_msg(group_id=qq, message=unescape(qq_list[-1]))
else:
for qq in group_list:
await bot.send_group_msg(group_id=qq, message=unescape(param))
elif cmd.to_user:
params = param.split(None, 1)
if params[0] == params[-1]:
await to_cmd.reject('缺少需要发送的消息\n' + __doc__)
else:
await bot.send_private_msg(user_id=params[0], message=unescape(params[1]))
elif cmd.to_group:
params = param.split(None, 1)
if params[0] == params[-1]:
await to_cmd.reject('缺少需要发送的消息\n' + __doc__)
else:
await bot.send_group_msg(group_id=params[0], message=unescape(params[1]))
await to_cmd.finish(Message('[CQ:face,id=124]'))
request_cmd = on_request()
@request_cmd.handle()
async def request(bot: Bot, event: GroupRequestEvent):
f_group = event.group_id
f_user = event.user_id
if event.sub_type == 'invite':
result = request_cmd.new("message",
permission=SUPERUSER | PRIVATE,
temp=True,
priority=5)
await bot.send_private_msg(user_id=912871833,
message=f'有新的群邀请:\n群:{f_group}\n邀请人:{f_user}')
request_event = event
@result.handle()
async def _(bot: Bot, event: MessageEvent):
msg = 'reject'
if str(event.message) in ['yes', '1']:
msg = 'approve'
await request_event.approve(bot)
else:
await request_event.reject(bot)
await result.finish(msg)
# def is_sublist(a, b):
# if a == []: return True
# if b == []: return False
# return b[:len(a)] == a or is_sublist(a, b[1:])
def sublist(a, b):
if a == []: return (0, 0)
if b == []: return False
for i in range(len(b)):
if not b[:len(a)] == a:
b = b[1:]
else:
return (i, i + len(a))
def pinyin2api(s):
api_pinyin = lazy_pinyin(s)
cmd_map = {
'send': ['sen', 'de'],
'set': ['sai', 'te'],
'get': ['gei', 'te'],
'delate': ['di', 'lei', 'te'],
'group': ['ge', 'rou', 'pu'],
'private': ['pu', 'rui', 'wei', 'te'],
'msg': ['mai', 'shei', 'ji'],
'ban': ['ban'],
'whole': ['hou'],
'friend': ['fu', 'run', 'de'],
'id': ['ai', 'di'],
'user': ['you', 're'],
}
for k, v in cmd_map.items():
r = sublist(v, api_pinyin)
if r:
del api_pinyin[r[0]:r[1]]
api_pinyin.insert(r[0], k)
return '_'.join(api_pinyin)
def isall_chinese(s):
return all(u'\u4e00' <= ch <= u'\u9fa5' for ch in s)
call_api = on_command('api', aliases={'call', '希司提姆靠鲁', '希斯提姆靠鲁', '希司提姆考鲁', '希斯提姆考鲁'}, permission=SUPERUSER)
@call_api.handle()
async def _(bot: Bot, event: MessageEvent):
msg = str(event.message).split()
param = event.dict()
if msg:
api, *params = msg
if isall_chinese(api):
api = pinyin2api(api)
# _input = {}
# for i in params:
# k, v = i.split('=', 1)
# _input[pinyin2api(k) if isall_chinese(k) else k] = v
param.update(dict(i.split('=', 1) for i in params))
# param.update(_input)
# if MessageSegment.reply in event.message:
# ...
if param.get('message'):
param['message'] = Message(unescape(str(param.get('message'))))
res = await bot.call_api(api, **param)
if res:
await call_api.finish(message=Message(str(res)))
iptracker = on_command('iptracker', permission=SUPERUSER)
@iptracker.handle()
async def _(bot: Bot, event: MessageEvent):
type_ = str(event.message)
randnum = random.random()
await bot.send(event, message=str(randnum))
await iptracker.finish(message=Message(gentracker(randnum, type=int(type_) if type_ else 0)))
show_me = on_keyword({'闪光弹', '照明弹'}, permission=SUPERUSER)
@show_me.handle()
async def _(bot: Bot, event: GroupMessageEvent):
if 'reply' in event.raw_message:
msg = event.reply.raw_message.replace(',type=flash', '')
await bot.send(event, Message(msg))
# scheduler = require('nonebot_plugin_apscheduler').scheduler
#
# @scheduler.scheduled_job('cron', hour='*', id='ti_gang')
# async def ti_gang():
# await call_api_delay('send_group_msg',
# random.randint(1, 100),
# group_id=476328543,
# message=Message('[CQ:image,file=d01d3883a38999345e536012aeb18c76.image,url=https://c2cpicdw.qpic.cn/offpic_new/912871833//912871833-2997538805-D01D3883A38999345E536012AEB18C76/0?term=3]'))
# temp = """<section style="text-align: center; line-height: 1.75em; margin-left: 8px; margin-right: 8px;">
# <section style="margin-right: auto;margin-left: auto;width: 100%;vertical-align: middle;display: inline-block;line-height: 0;box-sizing: border-box;">
# <section style="display: inline-block;width: 100%;vertical-align: top;background-position: 0% 0%;background-repeat: no-repeat;background-size: 100%;background-attachment: scroll;background-image: url("{url2}");-webkit-tap-highlight-color: transparent;">
# <svg enable-background="new 0 0 1080 435" space="preserve"
# style="display: inline-block;width: 100%;vertical-align: top;background-position: 0% 0%;background-repeat: no-repeat;background-size: 100%;background-attachment: scroll;background-image: url("{url1}");-webkit-tap-highlight-color:transparent;"
# version="1.1" viewBox="0 0 1080 435" x="0px" xlink="http://www.w3.org/1999/xlink" xml=""
# xmlns="http://www.w3.org/2000/svg" y="0px">
# <animate attributeName="opacity" begin="click" dur="0.5s" values="1;0" fill="freeze" restart="never"></animate>
# </svg>
# </section>
# </section>
# </section>"""
# merge_cmd = on_command('代码')
# @merge_cmd.handle()
# async def _(bot: Bot, event: MessageEvent):
# try:
# url1, url2 = event.message.__str__().split()
# await bot.send(event, message=temp.format(url1=url1, url2=url2))
# except:
# print('error')
# request_cmd = on_message(permission=PRIVATE)
#
#
# @request_cmd.handle()
# async def request(bot: Bot, event: MessageEvent):
# # 接收私聊消息
# f_user = event.user_id
# if True:
# # 创建临时 matcher
# request_cmd.new("message",
# handlers=[decide],
# permission=SUPERUSER,
# temp=True)
#
# await bot.send_private_msg(user_id=912871833,
# message=f'{f_user}:\n{event.raw_message}')
#
#
# async def decide(bot: Bot, event: MessageEvent):
# # 临时 matcher 响应事件
# await request_cmd.send(message=event.message)
| Joenothing-lst/qbot | src/plugins/admin/__init__.py | __init__.py | py | 9,965 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "nonebot.on_command",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "nonebot.permission.SUPERUSER",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "src.utils.util.gen_parser",
"line_number": 29,
"usage_type": "call"
},
{
"api_nam... |
37708709276 | from django.urls import path
from . import views
app_name = "shop"
urlpatterns = [
path("", views.all_products, name="all_products"),
path("<slug:c_slug>/", views.all_products, name="category_products"),
path("product/new/", views.add_product, name="add_product"),
path("product/remove/<slug:p_slug>", views.remove_product, name="remove_product"),
path("product/edit/<slug:p_slug>", views.update_product, name="edit_product"),
path("product/<slug:p_slug>/", views.product_detail, name="product_detail"),
]
| aleksandr-hilko/alex_online_shop | homeshop/shop/urls.py | urls.py | py | 532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
40483436324 | import tkinter
import os
from PIL import Image, ImageTk
class OngletsPersonnage():
def __init__(self, main_onglets):
self.onglets_personnage = tkinter.ttk.Frame(main_onglets)
self.onglets_personnage.pack()
main_onglets.add(self.onglets_personnage, text='character')
self.create_canvas_character()
def set_character(self, character):
self.character = character
def create_canvas_character(self):
self.canvas_gfx_character = tkinter.Canvas(self.onglets_personnage)
self.create_charater("0","None","nul","nul")
self.canvas_gfx_character.place(relx=0.03, rely=0.1, relwidth=1, relheight=1)
self.canvas_vita = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\vitaliter.png",self.canvas_vita)
self.canvas_vita.place(relx=0.75, rely=0.05, relwidth=0.1, relheight=0.12)
self.canvas_sagesse = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\sagesse.png",self.canvas_sagesse)
self.canvas_sagesse.place(relx=0.75, rely=0.20, relwidth=0.1, relheight=0.12)
self.canvas_force = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\force.png",self.canvas_force)
self.canvas_force.place(relx=0.75, rely=0.35, relwidth=0.1, relheight=0.12)
self.canvas_intel = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\intelligence.png",self.canvas_intel)
self.canvas_intel.place(relx=0.75, rely=0.50, relwidth=0.1, relheight=0.12)
self.canvas_chance = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\chance.png",self.canvas_chance)
self.canvas_chance.place(relx=0.75, rely=0.65, relwidth=0.1, relheight=0.12)
self.canvas_agi = tkinter.Canvas(self.onglets_personnage)
self.print_image("stats\\agilite.png",self.canvas_agi)
self.canvas_agi.place(relx=0.75, rely=0.80, relwidth=0.1, relheight=0.12)
def create_label_caracteristique(self,character):
self.label_vita = tkinter.Label(self.onglets_personnage, text = character.vie_max)
self.label_vita.place(relx=0.80, rely=0.05, relwidth=0.1, relheight=0.12)
self.label_sagesse = tkinter.Label(self.onglets_personnage, text = character.sagesse)
self.label_sagesse.place(relx=0.80, rely=0.20, relwidth=0.1, relheight=0.12)
self.label_force = tkinter.Label(self.onglets_personnage, text = character.force)
self.label_force.place(relx=0.80, rely=0.35, relwidth=0.1, relheight=0.12)
self.label_intel = tkinter.Label(self.onglets_personnage, text = character.intel)
self.label_intel.place(relx=0.80, rely=0.50, relwidth=0.1, relheight=0.12)
self.label_chance = tkinter.Label(self.onglets_personnage, text = character.chance)
self.label_chance.place(relx=0.80, rely=0.65, relwidth=0.1, relheight=0.12)
self.label_agi = tkinter.Label(self.onglets_personnage, text = character.agi)
self.label_agi.place(relx=0.80, rely=0.80, relwidth=0.1, relheight=0.12)
def print_image(self,path,canvas_):
dir_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),"resource\\" +path )
image = Image.open(dir_path)
photo = ImageTk.PhotoImage(image)
canvas_.create_image(photo.width(),photo.height(),image=photo)
canvas_.image = photo
def create_charater(self,gfx,speudo ,id_,lvl = ""):
dir_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))),f"resource\\gfx\\{gfx}.png")
image = Image.open(dir_path)
photo = ImageTk.PhotoImage(image)
self.canvas_gfx_character.create_image(photo.width()/4.5,photo.height()/2,image=photo)
self.canvas_gfx_character.image = photo
self.canvas_gfx_character.place(relx=0.05, rely=0.9, relwidth=0.5, relheight=0.5)
self.canvas_gfx_character.place(relx=0.03, rely=0.1, relwidth=1, relheight=1)
speudo_and_id ="SPEUDO: "+ speudo +" ID: "+ id_ + " LEVEL: "+ lvl
name = tkinter.Label(self.onglets_personnage, text = speudo_and_id)
name.place(relx=0.01, rely=0.017,relwidth=0.4, relheight=0.09)
| Azzary/LeafMITM | interface/onglets/onglets_personnage.py | onglets_personnage.py | py | 4,255 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "tkinter.ttk.Frame",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tkinter.Canvas",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
... |
6960045652 | import numpy as np
import matplotlib.pyplot as plt
x = np.arange(10, 90, 10.)
y = np.array([25, 70, 380, 550, 610, 1220, 830, 1450])
plt.figure(1)
plt.plot(x, y, 'ro-')
plt.grid()
xsum=np.sum(x)
ysum=np.sum(y)
xysum=sum(x*y)
n=np.size(x)
xavg=xsum/n
yavg=ysum/n
a1=(n*xysum-xsum*ysum)/(n*sum(x**2)-xsum**2)
a0= yavg-xavg*a1
plt.figure(2)
y1=a1*x+a0
plt.plot(x, y, 'ro-', x, y1, 'b*-')
plt.grid()
p1=np.polyfit(x,y,1)
# array([ 19.4702381 , -234.28571429])
plt.figure(3)
y1=a1*x+a0
plt.plot(x, y, 'ro-', x, y1, 'b*-', x, np.polyval(p1, x), 'mp-')
plt.grid()
| SCKIMOSU/Numerical-Analysis | polyfit_implement.py | polyfit_implement.py | py | 566 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "numpy.arange",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
... |
38456424440 | import re
import os
import torch
import base64
import uvicorn
import numpy as np
from io import BytesIO
from PIL import Image
from typing import Union
from fastapi import FastAPI, File, Form
from pydantic import BaseModel
from maskrcnn_benchmark.config import cfg
from maskrcnn_benchmark.engine.predictor_glip import GLIPDemo
def base64_to_image(base64_str, image_path=None):
base64_data = re.sub('^data:image/.+;base64,', '', base64_str)
byte_data = base64.b64decode(base64_data)
image_data = BytesIO(byte_data)
img = Image.open(image_data)
if image_path:
img.save(image_path)
return img
def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right
if clip:
clip_coords(x, (h - eps, w - eps)) # warning: inplace clip
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center
y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center
y[:, 2] = (x[:, 2] - x[:, 0]) / w # width
y[:, 3] = (x[:, 3] - x[:, 1]) / h # height
return y
def predict2json(image,caption):
image = np.array(image)[:,:,::-1]
predictions = glip_demo.compute_prediction(image, caption)
glip_demo.confidence_threshold = 0.5
top_predictions = glip_demo._post_process_fixed_thresh(predictions)
boxs = top_predictions.bbox
index = top_predictions.get_field("labels")
probs = top_predictions.get_field("scores")
h,w,_ = image.shape
xywhs = xyxy2xywhn(x=boxs,w=w,h=h)
res = {}
for c, (i,loc,prob) in enumerate(zip(index,xywhs,probs)):
x,y,w,h = loc
res[c] = {}
res[c]['index'] = int(i) -1
res[c]['label'] = glip_demo.entities[int(i) -1]
res[c]['prob'] = float(prob)
res[c]['x'] = float(x)
res[c]['y'] = float(y)
res[c]['w'] = float(w)
res[c]['h'] = float(h)
return res
config_file = "configs/pretrain/glip_Swin_T_O365_GoldG.yaml"
weight_file = "MODEL/glip_tiny_model_o365_goldg_cc_sbu.pth"
cfg.local_rank = 0
cfg.num_gpus = 1
cfg.merge_from_file(config_file)
cfg.merge_from_list(["MODEL.WEIGHT", weight_file])
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])
glip_demo = GLIPDemo(
cfg,
min_image_size=800,
confidence_threshold=0.5,
show_mask_heatmaps=False
)
app = FastAPI()
class Item(BaseModel):
name: str
price: float
is_offer: Union[bool, None] = None
@app.get("/")
def read_root():
return {"Hello": "World"}
@app.post("/upload")
def upload(base64_str: str = Form(...), caption: str = Form(...)):
try:
image = base64_to_image(base64_str)
res = predict2json(image,caption)
except Exception as e:
return {"message": f"{e}"}
return res
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=5000) | bensonbs/GLIP | main.py | main.py | py | 2,914 | python | en | code | 5 | github-code | 6 | [
{
"api_name": "re.sub",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number"... |
2107589551 | import pygame
import sys
from space_objects import *
from tools import *
pygame.init()
infoObject = pygame.display.Info()
W_SIZE = WIDTH, HEIGHT = (infoObject.current_w, infoObject.current_h)
H_SIZE = H_WIDTH, H_HEIGHT = WIDTH // 2, HEIGHT // 2
screen = pygame.display.set_mode(W_SIZE, pygame.FULLSCREEN)
clock = pygame.time.Clock()
FPS = 60
rotate_speed = 500
length = 10
radius = 1 / 100
sun = Object(
screen,
radius * 40000,
"data/sun.png",
rotate_speed / 3600,
"Sun"
)
mercury = MovingObject(
screen,
radius * 2439,
"data/mercury.png",
rotate_speed / 80,
"Mercury",
length * 70,
rotate_speed / 88,
sun,
)
venus = MovingObject(
screen,
radius * 6051,
"data/venus.png",
rotate_speed / 80,
"Venus",
length * 108,
rotate_speed / 224,
sun,
)
earth = MovingObject(
screen,
radius * 6371,
"data/earth.png",
rotate_speed / 365,
"Earth",
length * 151,
rotate_speed / 365,
sun,
)
mars = MovingObject(
screen,
radius * 3389,
"data/mars.png",
rotate_speed / 70,
"Mars",
length * 250,
rotate_speed / 687,
sun,
)
jupiter = MovingObject(
screen,
radius * 40000,
"data/jupiter.png",
rotate_speed / 70,
"Jupiter",
length * 741,
rotate_speed / 4329,
sun,
)
saturn = MovingObject(
screen,
radius * 30000,
"data/saturn.png",
rotate_speed / 70,
"Saturn",
length * 1464,
rotate_speed / 10768,
sun,
)
uranus = MovingObject(
screen,
radius * 21000,
"data/uranus.png",
rotate_speed / 70,
"Uranus",
length * 2938,
rotate_speed / 30660,
sun,
)
neptune = MovingObject(
screen,
radius * 20000,
"data/neptune.png",
rotate_speed / 70,
"Neptune",
length * 4473,
rotate_speed / 59860,
sun,
)
moon = MovingObject(
screen,
radius * 1737,
"data/moon.png",
rotate_speed / 20,
"Moon",
length * 40,
rotate_speed / 30,
earth,
)
objects = Objects((H_WIDTH, H_HEIGHT), sun, mercury, venus, earth, mars, jupiter, saturn, uranus, neptune, moon)
mouse_pos = mx, my = 0, 0
is_drag = False
scale_factor = 1.1
class Panel:
def __init__(self, screen, width, objects):
self.screen = screen
self.width = width
self.screen_size = self.screen.get_size()
self.objects = objects
self.image = pygame.Surface((width, screen.get_height()))
self.image.set_alpha(170)
self.half_button_background = pygame.Surface((15, 100))
self.half_button_background.set_alpha(170)
pygame.draw.rect(
self.half_button_background, (1, 1, 1), (0, 1, 14, 98), 0, -1, -1, 5, -1, 5
)
self.half_button_background.set_colorkey((0, 0, 0))
self.button_background = pygame.Surface((30, 100))
self.button_background.set_alpha(170)
pygame.draw.rect(self.button_background, (1, 1, 1), (1, 1, 28, 98), 0, 5)
self.button_background.set_colorkey((0, 0, 0))
self.buttons = list()
for i, obj in enumerate(self.objects.objects):
button = TextButton(screen, obj.name, (20, i * 40 + 200))
self.buttons.append(button)
self.is_opened = False
self.draw_trajectory_button = TextButton(
self.screen, "draw trajectory", (20, 30)
)
self.speed_label = pygame.font.Font(None, 32).render("speed", True, (200,) * 3)
self.speed_slider = Slider(self.screen, (self.width // 2, 140), (210, 15))
self.speed_slider.set_value(1 / 1.5)
self.exit_button = TextButton(
self.screen, "exit", (20, self.screen_size[1] - 30)
)
image = pygame.Surface((30, 100))
image.set_colorkey((0, 0, 0))
image_pressed = image.copy()
points = ((10, 30), (22, 50), (10, 70))
pygame.draw.polygon(image, (200,) * 3, points)
pygame.draw.polygon(image_pressed, (240,) * 3, points)
rect_values = ((1, 1, 28, 98), 2, 5)
pygame.draw.rect(image, (200,) * 3, *rect_values)
pygame.draw.rect(image_pressed, (240,) * 3, *rect_values)
self.open_button = Button(
screen, image, image_pressed, (15, self.screen_size[1] // 2), True
)
image = pygame.Surface((30, 100))
image.set_colorkey((0, 0, 0))
image_pressed = image.copy()
points = ((20, 30), (8, 50), (20, 70))
pygame.draw.polygon(image, (200,) * 3, points)
pygame.draw.polygon(image_pressed, (240,) * 3, points)
pygame.draw.rect(image, (200,) * 3, *rect_values)
pygame.draw.rect(image_pressed, (240,) * 3, *rect_values)
self.close_button = Button(
screen, image, image_pressed, (self.width, self.screen_size[1] // 2), True
)
def update(self, mouse_pos, clicked):
change_visibility = False
speed = False
is_exit = False
if self.is_opened:
surf = blur(self.get_sub_surf(), 15)
surf.blit(self.image, (0, 0))
self.screen.blit(surf, (0, 0))
self.screen.blit(
self.half_button_background, (self.width, self.screen_size[1] // 2 - 50)
)
for i, button in enumerate(self.buttons):
button.update(mouse_pos, clicked)
if button.triggered():
self.objects.set_main_object(i)
self.screen.blit(self.speed_label, (20, 100))
self.speed_slider.update(clicked, mouse_pos)
speed = self.speed_slider.get_value()
self.draw_trajectory_button.update(mouse_pos, clicked)
if self.draw_trajectory_button.triggered():
change_visibility = True
self.close_button.update(mouse_pos, clicked)
if self.close_button.triggered():
self.is_opened = False
self.exit_button.update(mouse_pos, clicked)
if self.exit_button.triggered():
is_exit = True
pygame.draw.line(
self.screen,
(200,) * 3,
(self.width, 0),
(self.width, self.screen_size[1] // 2 - 50),
)
pygame.draw.line(
self.screen,
(200,) * 3,
(self.width, self.screen_size[1] // 2 + 49),
(self.width, self.screen_size[1]),
)
else:
self.screen.blit(self.button_background, (0, self.screen_size[1] // 2 - 50))
self.open_button.update(mouse_pos, clicked)
if self.open_button.triggered():
self.is_opened = True
return change_visibility, speed, is_exit
def mouse_in_panel(self, mouse_pos):
return panel.is_opened and mouse_pos[0] < self.width
def get_sub_surf(self):
sub = self.screen.subsurface((0, 0, self.width, self.screen_size[1]))
return sub
panel = Panel(screen, 250, objects)
while True:
screen.fill((0, 0, 0))
mouse_pos = mx, my = pygame.mouse.get_pos()
if is_drag:
y_movement = prev_mouse_pos[1] - my
x_movement = prev_mouse_pos[0] - mx
objects.move_camera(x_movement, y_movement)
prev_mouse_pos = mx, my
clicked = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
objects.camera.set_offsets((H_WIDTH, H_HEIGHT))
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
clicked = True
if event.button == 4:
objects.scale(scale_factor)
if event.button == 5:
objects.scale(1 / scale_factor)
if event.type == pygame.MOUSEBUTTONUP:
if event.button == 1:
panel.speed_slider.release()
is_drag = False
if clicked and not panel.mouse_in_panel(mouse_pos):
is_drag = True
objects.update()
change_visibility, speed, is_exit = panel.update(mouse_pos, clicked)
if change_visibility:
objects.change_trajectory_visible()
if speed:
objects.set_speed(speed * 1.5)
if is_exit:
pygame.quit()
sys.exit()
pygame.display.update()
clock.tick(FPS)
| Programmer-Anchous/Solar-system-model | main.py | main.py | py | 8,409 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display.Info",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set... |
7973610749 | import logging
from dataclasses import asdict
from typing import List
from game_service.routers.templates import BasicResponse
from game_service.services.game_manager import CodingConundrumManager
logging.basicConfig(format='%(name)s-%(levelname)s|%(lineno)d: %(message)s', level=logging.INFO)
log = logging.getLogger(__name__)
from fastapi import (
APIRouter,
HTTPException,
Request,
Response,
WebSocket,
WebSocketDisconnect,
status,
)
from pydantic import BaseModel
ROUTE_PREFIX = '/games'
router = APIRouter(
prefix=ROUTE_PREFIX,
)
class WebSocketConnectionManager:
def __init__(self):
self.active_connections: List[WebSocket] = []
async def connect(self, websocket: WebSocket):
await websocket.accept()
self.active_connections.append(websocket)
def disconnect(self, websocket: WebSocket):
self.active_connections.remove(websocket)
async def send_personal_message(self, message: str, websocket: WebSocket):
await websocket.send_json(message)
async def broadcast(self, message: str):
for connection in self.active_connections:
await connection.send_json(message)
connection_manager = WebSocketConnectionManager()
game_manager = CodingConundrumManager(connection_manager)
@router.websocket('/codingconundrum')
async def coding_conundrum_websocket_endpoint(websocket: WebSocket):
await connection_manager.connect(websocket)
await game_manager.handle_new_connection(websocket)
try:
while True:
data = await websocket.receive_text()
await game_manager.handle_incoming_message(data)
except WebSocketDisconnect:
connection_manager.disconnect(websocket)
@router.get('/')
async def compiler_status(request: Request):
return BasicResponse(message="we're up!") | zhuweiji/CPP-FYP-Proj | game_service/game_service/routers/game_handlers.py | game_handlers.py | py | 1,858 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "fastapi.APIRoute... |
74916425146 | import util
import cv2
import torch
import os
def compareTensors(refs, target, targetName):
sum_ = 0
if len(refs) == 0:
print("no reference images")
return
for i in range(len(refs)):
ref = refs[i]
dotself = torch.tensordot(ref , ref, dims=2)
sum_ = sum_ + torch.tensordot(ref, target, dims=2) / dotself
'''
Trying straight up distance. Note: need to reverse sort max/min.
sum = sum + torch.dist(ref,target)
Trying mean squred error. Note: need to reverse sort max/min.
local mse = nn.MSECriterion()
mse.sizeAverage = false
local loss = mse:forward(ref,target) print("loss=", loss)
sum = sum + loss
note, max/min reversed
'''
return sum_ / len(refs)
def compareFile(selectedLayer, refs, targetsFolder, fileName, net):
img = util.process(cv2.imread(targetsFolder+"/"+fileName))
#net.forward(img)
img = torch.from_numpy(img)
img = img.unsqueeze(0)
net.fc.fc8.register_forward_hook(get_activation('fc8'))
output = net(img.float())
output = activation['fc8']
return compareTensors(refs, output, fileName)
activation = {}
def get_activation(name):
def hook(model, input, output):
activation[name] = output.detach()
return hook
| EunbinSeo/Pytorch-vgg-memoji | compare.py | compare.py | py | 1,363 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.tensordot",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "torch.tensordot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "util.process",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_n... |
5203502596 | # -*- coding: utf-8 -*-
"""
Spyderエディタ
これは一時的なスクリプトファイルです
"""
#WEBクローリング
import time
import re
import requests
import lxml.html
from pymongo import MongoClient
def main():
client = MongoClient('localhost', 27017)
#scrapingデータベースのebooksコレクションを作成
collection = client.scraping.ebooks
#keyフィールドとしてユニークなインデックスを設定
collection.create_index('key', unique=True)
#Webページを取得、繰り返しアクセスするためSessionを使用
response = requests.get('https://gihyo.jp/dp')
#URLリストのジェネレータを取得
urls = scrape_list_page(response)
#url_list = [str(url) for url in urls]
for url in urls:
#url = url_list[0]
#キーの取得
key = extract_key(url)
#キーが同じ最初のドキュメントを取得
ebook = collection.find_one({'key': key})
#キーが同じドキュメントが存在しない場合
if not ebook:
#各URLにアクセス
time.sleep(1)
response = requests.get(url)
#ebookコレクションのドキュメントを作成
ebook = scrape_detail_page(response)
#DBにドキュメントを追加
collection.insert_one(ebook)
print(ebook)
#WEBページ(html)を入力
def scrape_list_page(response):
#htmlをパース
root = lxml.html.fromstring(response.content)
#URLを相対パスから絶対パスに変換
root.make_links_absolute(response.url)
#id属性がlistBookの子孫で、a要素のitemprop属性がurlの値を取得(CSSセレクター)
for a in root.cssselect('#listBook a[itemprop="url"]'):
url = a.get('href')
yield url
#各書籍の情報(タイトル、価格、目次)を取得
def scrape_detail_page(response):
root = lxml.html.fromstring(response.content)
ebook = {
'url': response.url,
'key': extract_key(response.url),
'title': root.cssselect('#bookTitle')[0].text_content(),
'price': root.cssselect('.buy')[0].text,
'content': [normalize_spaces(h3.text_content()) for h3 in root.cssselect('#content > h3')],
}
return ebook
def extract_key(url):
#末尾から遡って、最初の/までの文字列を取得
m = re.search(r'([^/]+)$', url)
return m.group(1)
#任意の空白文字を取り除く
def normalize_spaces(s):
#return re.sub(r'\s+', ' ', s).strip()
return re.sub(r'\u3000+', ': ', s).strip()
if __name__ == '__main__':
main()
chk = {'a':0, 'b':1, 'c':3}
for val in chk:
chk[val] += 1
| inamasa12/cr-sc | python_crowler_4.py | python_crowler_4.py | py | 2,855 | python | ja | code | 0 | github-code | 6 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_... |
73706334586 | from django.shortcuts import render
from django.http import HttpResponse
from app1.models import Topic, Webpage, AccessRecord
from app1.forms import App1Form
# Create your views here.
def home(request):
#return HttpResponse("Hello Hao!")
my_dict = {'insert_me':"Goodbye now from view.py!!"}
return render(request, 'app1/home.html', context=my_dict)
def index(request):
wp_list = AccessRecord.objects.order_by('date')
date_dict = {'access_records':wp_list}
return render(request, 'app1/index.html', context=date_dict)
def test(request):
return HttpResponse("Goodbye!")
def form(request):
theForm = App1Form()
if request.method == 'POST':
theForm = App1Form(request.POST)
if theForm.is_valid():
# process form
print("Validation success:")
print("top_name: " + theForm.cleaned_data['top_name'])
theForm.save(commit=True)
print("Topic created in DB, going back to index page...")
return topics(request)
else:
print("Form Error")
return render(request, 'app1/form.html', {'the_form':theForm})
def topics(request):
t_list = Topic.objects.order_by('top_name')
t_dict = {'topics':t_list, 'section':{'title':'Topics', 'parent':'App1'}}
return render(request, 'app1/topics.html', context=t_dict) | haozer/project1 | app1/views.py | views.py | py | 1,277 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.shortcuts.render",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app1.models.AccessRecord.objects.order_by",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app1.models.AccessRecord.objects",
"line_number": 13,
"usage_type": "at... |
7782101624 | import cv2
import random
import numpy as np
frameWidth = 640
frameHeight = 480
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
save = False
colors = [[51, 153, 255],
[255, 0, 255],
[0, 255, 0],
[255, 0, 0],
[0, 0, 255]]
color = random.choice(colors)
points=[]
def draw_event(event, x, y,flags,params):
global save, img, color
if event == cv2.EVENT_LBUTTONDOWN and ~save:
save = True
if event == cv2.EVENT_LBUTTONUP:
save = False
color = random.choice(colors)
if (save):
points.append((x,y))
while True:
success, img = cap.read()
clone = img.copy()
cv2.imshow("Drawing", img)
cv2.setMouseCallback('Drawing', draw_event)
if len(points):
for point in points:
x,y=point
cv2.circle(img, (x, y), 4, color, cv2.FILLED)
imgHSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
# lower boundary RED color range values; Hue (0 - 10)
lower1 = np.array([0, 100, 20])
upper1 = np.array([10, 255, 255])
# upper boundary RED color range values; Hue (160 - 180)
lower2 = np.array([160, 100, 20])
upper2 = np.array([179, 255, 255])
lower_mask = cv2.inRange(imgHSV, lower1, upper1)
upper_mask = cv2.inRange(imgHSV, lower2, upper2)
full_mask = lower_mask + upper_mask
contours, _ = cv2.findContours(full_mask, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
for cnt in contours:
approx = cv2.approxPolyDP(
cnt, 0.2 * cv2.arcLength(cnt, True), True)
cv2.drawContours(img, [approx], 0, (0, 255, 5), 1)
cv2.imshow('Drawing', img)
key = cv2.waitKey(1) & 0xFF
if key == ord("r"):
img = clone.copy()
elif key == ord("x"):
break
| tarekbrahmi/Open-cv-project | learining/projects and apps/other/webcam-drawing.py | webcam-drawing.py | py | 1,934 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.EVENT_LBUTTONDOWN",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.EVENT_LB... |
42090679043 | from events import OnEvents
from environment import Environment
from util import Util
class Component(OnEvents):
""" Base Class for individual processes.
"""
def __init__(self):
super(Component, self).__init__()
self.exec_times = []
self.Util = Util()
def run(self, **kwargs):
pass
def execute(self, kwargs, stdout=None, stdin=None,
return_output=False, print_output=False,
current_wd=None, logger=None, hook=True):
cmd = [self.executable]
for arg in self.args:
if 'stdout' == arg:
stdout = arg
elif 'stdin' == arg:
stdin = arg
else:
if isinstance(arg, list):
#value = [arg[0], getattr(self, arg[1])]
if kwargs[arg[1]] is not None:
value = [arg[0], kwargs[arg[1]]]
else:
value = None
else:
value = kwargs[arg]
if value is not None:
if not isinstance(value, list):
value = [value,]
for v in value:
if v not in (None, '') and not (not v and isinstance(v, bool)):
cmd.append(str(v))
output = self.Util.exec_cmd(cmd, stdout, stdin,
return_output, print_output,
current_wd, logger)
self.exec_times.append(output['exec_time'])
if hook:
retval = output['retval']
kwargs.update({'output': output})
success = True if retval == 0 else False
self.event_trigger(success, **kwargs)
return output
def get_last_exec_time(self):
if self.exec_times:
return self.exec_times[-1]
else:
return 0
def get_avg_exec_time(self):
return sum(self.exec_times)/len(self.exec_times)
| tom-kerr/bookmaker | components/component.py | component.py | py | 2,107 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "events.OnEvents",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "util.Util",
"line_number": 11,
"usage_type": "call"
}
] |
10663274434 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 16 11:25:30 2020
@author: Rijk
Extracts the resistance from the IV curves measured
"""
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 22 17:10:35 2019
@author: LocalAdmin
Curve fitting script
"""
import os
import math as m
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
import instrument_module as instr
def linear(x, a):
return a * x
# =============================================================================
# # Inputs
# =============================================================================
meas_name = '0117_1703_WO3196_full_IV_curve'
source_folder = r'D:\Rijk\MEP_control_software'
num_points = 10
folder = os.path.join(source_folder, meas_name)
fit_folder = folder + '\\fit_minus'
try:
os.mkdir(fit_folder)
except:
pass
file_name = os.path.join(source_folder, meas_name, 'data', meas_name)
file_current = file_name +'_current'
file_voltage = file_name + '_voltage'
func = linear
start = 17
stop = start
#p0 = [1E12, -3/2]
#p0 = [2E7, 1E4, 2E7]
#bounds = (0, np.inf)
# =============================================================================
# # Import data
# =============================================================================
ts = instr.load_data(file_current)[0]
currents = instr.load_data(file_current)[1][101:]
voltages = instr.load_data(file_voltage)[1][101:]
stop = len(currents) - stop
if start > 0:
if stop < len(currents):
currents = currents[start:stop]
voltages = voltages[start:stop]
else:
print('Stop index too large for current array')
currents = currents[start:]
voltages = voltages[start:]
currents = currents - min(currents)
else:
print('Start index zero or lower, so not used')
if stop < len(currents):
currents = currents[:stop]
voltages = voltages[:stop]
else:
print('Stop index too large for current array')
# =============================================================================
# # Perform regular fit and constrained fit
# =============================================================================
res_mean, res_var = curve_fit(func, currents, voltages, maxfev=int(1E9))
#popt, pcov = curve_fit(func, currents, voltages, p0, maxfev=int(1E9))
#popt, pcov = curve_fit(func, xdata, ydata, p0, maxfev=int(1E7), bounds=bounds)
res_std = np.sqrt(res_var)
ohm_res = np.zeros(0)
ohm_res_curr = np.zeros(0)
for n, i in enumerate(currents):
if i != 0:
ohm_res_curr = np.append(ohm_res_curr, i)
ohm_res = np.append(ohm_res, voltages[n]/i)
else:
pass
# =============================================================================
# # Plot fit
# =============================================================================
#plt.close('all')
plt.figure()
plt.plot(currents, voltages)
plt.plot(currents, func(currents, res_mean))
plt.title('IV curve of 33MOhm')
plt.xlabel('Current (A)')
plt.ylabel('Voltage (V)')
plt.legend(['Data', 'Fit'])
instr.save_plot(os.path.join(fit_folder, meas_name + '_datafit'))
plt.figure()
plt.plot(ohm_res_curr, ohm_res)
plt.plot(currents, res_mean * np.ones(len(currents)))
#plt.plot(currents, func(currents, *popt))
plt.title('IV of 33MOhm with %.2e mean and %.2e std' % (res_mean, res_std))
plt.xlabel('Source current (A)')
plt.ylabel('Resistance (Ohm)')
plt.legend(['V/I Resistance', 'Fit Resistance'])
instr.save_plot(os.path.join(fit_folder, meas_name + '_resistances')) | rehogenbirk/MEP_control_software | fit_IVcurve_single.py | fit_IVcurve_single.py | py | 3,578 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": ... |
18091330209 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('account', '0060_auto_20150130_1750'),
]
operations = [
migrations.AlterField(
model_name='basicmemberinformation',
name='auth_key',
field=models.CharField(default='031910ad27f4d5c4ffa8ec23fe5ce895d59611079de70db9c7597121bfc2c443', max_length=64),
preserve_default=True,
),
]
| hongdangodori/slehome | slehome/account/migrations/0061_auto_20150201_1909.py | 0061_auto_20150201_1909.py | py | 531 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
39209939169 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
from scipy.interpolate import griddata
import copy
# import tecplot as tp
# with open('Rectangle_EXP.dat') as Rectangle_EXP:
# all_data =
# D rectangle = 100
def load_data(fname):
# To load tecplot dat to dataframe
# fname: file name
data = np.genfromtxt(fname, delimiter=' ', skip_header=10)
dataset = pd.DataFrame(data,columns=['X_D','Y_D','Z_D','U_Uinf'])
return dataset
def D_area(D,H):
# Calculate D_area for rectangle
D_area = ((4/math.pi)*D*H)**0.5
return D_area
def D_star(D,H):
# Calculate D_star for rectangle
D_star = 2*D*H/(D+H)
return D_star
def wake_scaling(dataset, D_gometry, D_scaling):
# To scale the wake.
# D_geometry: geometry diameter
# D_scaling: scale length
df = copy.copy(dataset)
df['X_D'] = df['X_D'] * D_gometry / D_scaling
df['Y_D'] = df['Y_D'] * D_gometry / D_scaling
df['Z_D'] = df['Z_D'] * D_gometry / D_scaling
return df
def extract_plane(dataset, xd):
# To interplate results in desired plane.
# dataset: normalized dataframe
# XD: a downstream position
df = dataset.loc[np.round(dataset['X_D'], 1)==round(xd,1)]
y = np.arange(np.min(df['Y_D']), np.max(df['Y_D']), 0.01)
z = np.arange(np.min(df['Z_D']), np.max(df['Z_D']), 0.01)
yg, zg = np.meshgrid(y, z)
u = griddata((df['Y_D'], df['Z_D']), df['U_Uinf'], (yg, zg), method='linear')
return yg, zg, u
def extract_line(yg,zg,u, zd):
yl = yg[np.round(zg,2)==round(zd,2)]
ul = u[np.round(zg,2)==round(zd,2)]
return yl,ul
# def grid_interplation(Rectangle):
# XX, YY, ZZ = np.meshgrid(Rectangle[['X_D']],Rectangle[['Y_D']],Rectangle[['Z_D']],sparse=True)
# %% Scaling length calculation
D_cir = 200; D_squ = 200; D_rec = 100; H_rec = 300
# Area-based Shammensodin and Port-Agel
D_area_cir = D_cir
D_area_squ = D_area(D_squ,D_squ)
D_area_rec = D_area(D_rec,H_rec)
# Area and perimeter based scaling length
D_star_cir = D_cir
D_star_squ = D_star(D_squ,D_squ)
D_star_rec = D_star(D_rec,H_rec)
# %% Read tecplot .dat file
f1 = 'Rectangle_EXP.dat'
f2 = 'Circle_EXP.dat'
f3 = 'Square_EXP.dat'
Rectangle = load_data(f1)
Circle = load_data(f2)
Square = load_data(f3)
# wake scaling
# D: loaded already
# %%
# D_area
Cir_area = wake_scaling(Circle,D_cir,D_area_cir)
Squ_area = wake_scaling(Square,D_squ,D_area_squ)
Rec_area = wake_scaling(Rectangle,D_rec,D_area_rec)
# %%
# D_star
Cir_star = wake_scaling(Circle,D_cir,D_star_cir)
Squ_star = wake_scaling(Square,D_squ,D_star_squ)
Rec_star = wake_scaling(Rectangle,D_rec,D_star_rec)
# %% Interpolation at postions wanted:
xd = np.linspace(1.,5.,5)
zd = np.linspace(0.5,0.,3)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Cir_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<0.6]; yl = yl[yl<0.6]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Squ_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<1]; yl = yl[yl<1]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# for i,x in enumerate(xd):
# yg, zg, u = extract_plane(Rec_area,x)
# for j,z in enumerate(zd):
# plt.subplot(len(zd),len(xd),len(zd)*i + j+1)
# yl,ul = extract_line(yg,zg,u, z)
# ul = ul[yl<1]; yl = yl[yl<1]
# ul = ul[yl>-1]; yl = yl[yl>-1]
# plt.plot(yl,ul)
# plt.show()
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
# %% plot D_area
fig1, axs1 = plt.subplots(len(zd),len(xd), sharex=True, sharey=True, gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(30,10))
fig1.suptitle('U/U_inf')
axs1[0,0].set_ylabel('Z/Darea=0.5')
axs1[1,0].set_ylabel('Z/Darea=0.25')
axs1[2,0].set_ylabel('Z/Darea=0')
# xlabel
for i,x in enumerate(xd):
axs1[2,i].set_xlabel('Y/Darea')
axs1[0,0].set_title('X/Darea=1')
axs1[0,1].set_title('X/Darea=2')
axs1[0,2].set_title('X/Darea=3')
axs1[0,3].set_title('X/Darea=4')
axs1[0,4].set_title('X/Darea=5')
plt.setp(axs1, xlim=(-0.99,0.99))
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Cir_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.6]; yl = yl[yl<0.6]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='grey',marker='o', fillstyle='full', markevery=8,markersize=12,linestyle='None')
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Squ_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='red',marker='s', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Rec_area,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs1[j, i].plot(yl,ul,color='blue',marker='d', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
# %% plot D_star
fig2, axs2 = plt.subplots(len(zd),len(xd), sharex=True, sharey=True, gridspec_kw={'hspace': 0, 'wspace': 0.1}, figsize=(30,10))
fig1.suptitle('U/U_inf')
axs2[0,0].set_ylabel('Z/Dstar=0.5')
axs2[1,0].set_ylabel('Z/Dstar=0.25')
axs2[2,0].set_ylabel('Z/Dstar=0')
# xlabel
for i,x in enumerate(xd):
axs2[2,i].set_xlabel('Y/Dstar')
axs2[0,0].set_title('X/Dstar=1')
axs2[0,1].set_title('X/Dstar=2')
axs2[0,2].set_title('X/Dstar=3')
axs2[0,3].set_title('X/Dstar=4')
axs2[0,4].set_title('X/Dstar=5')
plt.setp(axs2, xlim=(-0.99,0.99))
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Cir_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.6]; yl = yl[yl<0.6]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='grey',marker='o', fillstyle='full', markevery=8,markersize=12,linestyle='None')
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Squ_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='red',marker='s', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
for i,x in enumerate(xd):
yg, zg, u = extract_plane(Rec_star,x)
for j,z in enumerate(zd):
yl,ul = extract_line(yg,zg,u, z)
ul = ul[yl<0.9]; yl = yl[yl<0.9]
ul = ul[yl>-1]; yl = yl[yl>-1]
axs2[j, i].plot(yl,ul,color='blue',marker='d', fillstyle='none', markevery=8,markersize=12,linestyle='None',markeredgewidth = 2)
# %% save fig
fig1.savefig('U_Darea.svg', dip = 300)
fig2.savefig('U_Dstar.svg', dip = 300)
# %%
| hmharley/FlowData_processing_py | source/Plot.py | Plot.py | py | 7,313 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "copy.copy",
"line_... |
15864287326 | import pandas as pd
import matplotlib.pyplot as plt
# Set up the output screen
plt.style.use(style='ggplot')
plt.rcParams['figure.figsize'] = [20, 12]
# Read dataset
trainData = pd.read_csv('./train.csv')
# With outliers
plt.scatter(trainData.GarageArea, trainData.SalePrice, color='red')
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.show()
# Delete the outlier value of GarageArea
outlier_drop = trainData[(trainData.GarageArea < 999) & (trainData.GarageArea > 111)]
# Display the scatter plot of GarageArea and SalePrice after deleting
plt.scatter(outlier_drop.GarageArea, outlier_drop.SalePrice, color='green')
plt.xlabel('Garage Area')
plt.ylabel('Sale Price')
plt.show() | nikolozdz/Linear-Regression-Models-ICP5 | Task 1.py | Task 1.py | py | 690 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name"... |
72532378429 | from collections.abc import Sequence
from datetime import datetime, timedelta
from typing import Final
import arrow
import pytest
from pydantic import NonNegativeFloat
from simcore_service_dynamic_sidecar.modules.prometheus_metrics import (
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
_MAX_PROMETHEUS_SAMPLES,
_get_user_services_scrape_interval,
)
_DT_REF: Final[datetime] = arrow.utcnow().datetime
@pytest.mark.parametrize(
"input_query_times, expected",
[
pytest.param(
[], _MAX_DEFAULT_METRICS_SCRAPE_INTERVAL, id="no_prometheus_queries"
),
pytest.param(
[_DT_REF],
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
id="too_few_prometheus_queries",
),
([_DT_REF, _DT_REF + timedelta(seconds=5)], 5),
pytest.param(
[_DT_REF, _DT_REF + timedelta(seconds=1000)],
_MAX_DEFAULT_METRICS_SCRAPE_INTERVAL,
id="prometheus_queries_too_far_apart",
),
pytest.param(
[
_DT_REF + timedelta(seconds=i * 3)
for i in range(_MAX_PROMETHEUS_SAMPLES)
],
3,
id="average_over_prometheus_queries",
),
],
)
def test_get_user_services_scrape_interval(
input_query_times: Sequence[datetime], expected: NonNegativeFloat
):
assert _get_user_services_scrape_interval(input_query_times) == expected
| ITISFoundation/osparc-simcore | services/dynamic-sidecar/tests/unit/test_modules_prometheus_metrics.py | test_modules_prometheus_metrics.py | py | 1,426 | python | en | code | 35 | github-code | 6 | [
{
"api_name": "typing.Final",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "datetime.datetime",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "arrow.utcnow",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.abc.Sequence"... |
14255729146 | from _MOM import MOM
from _TFL import TFL
import _TFL._Meta.Object
import _TFL._Meta.Once_Property
from _TFL.predicate import first, paired
from _TFL.Decorator import getattr_safe
from _TFL.I18N import _, _T, _Tn
import itertools
import logging
class Entity (TFL.Meta.Object) :
"""Base class for scope-specific E_Type managers."""
def __init__ (self, etype, scope) :
self._etype = etype
self.home_scope = scope
# end def __init__
def __call__ (self, * args, ** kw) :
return self._etype (* args, scope = self.home_scope, ** kw)
# end def __call__
@TFL.Meta.Once_Property
@getattr_safe
def default_child (self) :
"""The default child of partial entity types, if any."""
dc = self._etype.default_child
if dc is not None :
try :
return self.home_scope [dc]
except KeyError :
pass
# end def default_child
@property
@getattr_safe
def ems (self) :
return self.home_scope.ems
# end def ems
@property
@getattr_safe
def E_Type (self) :
return self._etype
# end def E_Type
@TFL.Meta.Once_Property
@getattr_safe
def is_partial (self) :
return self._etype.is_partial
# end def is_partial
def ac_query_attrs (self, names, values, AQ = None) :
if AQ is None :
AQ = self._etype.AQ
for n in names :
if n in values :
try :
vq = getattr (AQ, n).AC (values [n])
except (ValueError, TypeError) :
pass
else :
if vq is not None :
yield vq
# end def ac_query_attrs
def ac_ui_display (self, names, matches) :
def _gen (self, names) :
for n in names :
try :
attr = self.get_etype_attribute (n)
except AttributeError :
disp = lambda v : getattr (v, "ui_display", v)
else :
disp = attr.ac_ui_display
yield disp
attr_displayers = list (_gen (self, names))
for match in matches :
yield tuple (d (v) for d, v in zip (attr_displayers, match))
# end def ac_ui_display
def get_etype_attribute (self, name) :
etype = self._etype
result = getattr (etype.AQ, name)._attr.kind
return result
# end def get_etype_attribute
def query (self, * filters, ** kw) :
"""Return all entities matching the conditions in `filters` and `kw`.
When no `filters` or `kw` are specified, `query` returns the
transitive extension of the type in question, i.e., all instances
of the type and all its subclasses.
When `strict = True` is specified as the only argument, `query`
returns the strict extension, i.e., all instances of the type in
question, but none of its subclasses.
All other filters reduce the number of instances returned to those
that satisfy the filter conditions.
"""
sort_key = kw.pop ("sort_key", None)
Type = self._etype
result = self.ems.query (Type, * filters, ** kw)
if sort_key is not None :
result = result.order_by (sort_key)
return result
# end def query
def raw_query_attrs (self, names, values = None, AQ = None) :
if AQ is None :
AQ = self._etype.AQ
def _gen (self, names, values, AQ) :
if values is None :
for n in names :
aq = getattr (AQ, n)
if aq is not None :
yield aq
else :
raise MOM.Error.Attribute_Unknown (None, n, None)
else :
for n in names :
if n in values :
aq = getattr (AQ, n)
v = values [n]
if aq is not None :
eq = aq.EQ (v)
if eq is not None :
yield eq
else :
raise MOM.Error.Attribute_Syntax \
(None, aq._attr, v)
else :
raise MOM.Error.Attribute_Unknown (None, n, v)
return tuple (_gen (self, names, values, AQ))
# end def raw_query_attrs
def __getattr__ (self, name) :
if name.startswith ("__") and name.endswith ("__") :
### Placate inspect.unwrap of Python 3.5,
### which accesses `__wrapped__` and eventually throws `ValueError`
return getattr (self.__super, name)
etype = self._etype
try :
return getattr (etype, name)
except Exception :
try :
return etype.attributes [name]
except KeyError :
raise AttributeError
# end def __getattr__
def __instancecheck__ (self, instance) :
return isinstance (instance, self.E_Type.Essence)
# end def __instancecheck__
def __subclasscheck__ (self, subclass) :
return issubclass (subclass, self.E_Type.Essence)
# end def __subclasscheck__
def __repr__ (self) :
return "<E_Type_Manager for %s of scope %s>" % \
(self._etype.type_name, self.home_scope.name)
# end def __repr__
# end class Entity
class An_Entity (Entity) :
"""Scope-specific manager for a specific type of anonymous entities."""
def example (self, full = True) :
return self (raw = True, ** self._etype.example_attrs (full))
# end def example
def query (self, * args, ** kw) :
### we need to define this function to hide the `query` attribute of
### the entities (which is a list of all attributes with the kind
### `Query`)
return TFL.Q_Result (())
# end def query
# end class An_Entity
class Id_Entity (Entity) :
"""Scope-specific manager for a specific essential object- or link-type."""
def __call__ (self, * args, ** kw) :
pid = kw.pop ("__pid", None)
result = self.__super.__call__ (* args, ** kw)
self.home_scope.add (result, pid = pid)
return result
# end def __call__
@property
@getattr_safe
def count (self) :
"""Return the transitive count of objects or links."""
return self.ems.count (self._etype, strict = False)
# end def count
@property
@getattr_safe
def count_strict (self) :
"""Return the strict count of objects or links."""
result = self.ems.count (self._etype, strict = True)
assert (not self.E_Type.is_partial) or result == 0
return result
# end def count_strict
def cooked_epk (self, epk, kw) :
(epk, kw), this = self._epkified (* epk, ** kw)
raw = kw.get ("raw", False)
epk_iter = (this._raw_epk_iter if raw else this._cooked_epk_iter)
return tuple (epk_iter (epk)), kw, this
# end def cooked_epk
def example (self, full = False) :
with self.home_scope.example_etm (self) as x_etm :
try :
return x_etm.instance_or_new \
(raw = True, ** x_etm._etype.example_attrs (full))
except MOM.Error.Partial_Type as exc :
pass
except Exception as exc :
if __debug__ :
logging.exception ("\n %s.example", self.type_name)
# end def example
def exists (self, * epk, ** kw) :
"""Return true if an object or link with primary key `epk` exists."""
epk, kw, this = self.cooked_epk (epk, kw)
kw.pop ("on_error", None)
if kw :
raise TypeError (kw)
return this.ems.exists (this._etype, epk)
# end def exists
def instance (self, * epk, ** kw) :
"""Return the object or link with primary key `epk` or None."""
epk, kw, this = self.cooked_epk (epk, kw)
return this.ems.instance (this._etype, epk)
# end def instance
def instance_or_new (self, * epk, ** kw) :
try :
result = self.instance (* epk, ** kw)
except MOM.Error.Error :
### let MOM.Entity handle this case
result = None
if result is None :
result = self (* epk, ** kw)
return result
# end def instance_or_new
def pid_query (self, pid) :
"""Return entity with persistent id `pid`."""
return self.ems.pid_query (pid, self._etype)
# end def pid_query
def query_s (self, * filters, ** kw) :
"""Return `self.query (* filters, ** kw)`
sorted by `kw.get ("sort_key", Type.sort_key)`.
"""
### Need to use `Q_Result_Composite` because `Type.sort_key` doesn't
### work with some backends (SQL, I am looking at you)
Type = self._etype
sort_key = kw.pop ("sort_key", Type.sort_key)
result = self.query (* filters, ** kw)
result = self.ems.Q_Result_Composite ([result], sort_key)
return result
# end def query_s
def query_1 (self, * filters, ** kw) :
"""Return the number of matches and the one single entity, if any,
for the conditions in `filters` and `kw`.
"""
q = self.query (* filters, ** kw).limit (2)
c = q.count ()
return c, q.first () if c == 1 else None
# end def query_1
def _epkified (self, * epk, ** kw) :
this = self
etype = self._etype
if epk and isinstance (epk [-1], etype.Type_Name_Type) :
this = self.home_scope [epk [-1]]
epk = epk [:-1]
etype = this._etype
### Don't pass `on_error` through here to avoid `Link.__call__`
### ending up with doubled error messages in case of
### `MOM.Error.Required_Missing`
kw = pkw = dict (kw)
kw.pop ("on_error", None)
if etype.args_as_kw and kw.get ("raw", False) :
pkw = etype._kw_polished \
( etype.epk_as_kw (* epk, ** kw)
, on_error = lambda * args, ** kw : False
)
epk = ()
return etype.epkified (* epk, ** pkw), this
# end def _epkified
# end class Id_Entity
class MD_Entity (Entity) :
"""Scope-specific manager for a specific type of meta-data entities."""
# end class MD_Entity
class Object (Id_Entity) :
"""Scope-specific manager for essential object-types."""
def ac_query_auto_split (self, text) :
result = []
et = self._etype
AQ = et.AQ
epk_aqc = [getattr (AQ, en).AC for en in et.epk_sig]
for epks in et.epk_splitter (text) :
single_value_queries = []
for v in epks :
acqs = [acq (v) for acq in epk_aqc]
single_value_queries.append (TFL.Filter_Or (* acqs))
result.append (self.query (* single_value_queries))
return result
# end def ac_query_auto_split
@property
@getattr_safe
def singleton (self) :
Type = self._etype
if Type.max_count == 1 :
try :
return first (self.query ().limit (2))
except IndexError :
pass
# end def singleton
def _cooked_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
if v is not None :
try :
yield pka.cooked (v)
except MOM.Error.No_Such_Entity :
yield None
else :
yield None
# end def _cooked_epk_iter
def _raw_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
if v is not None :
try :
yield pka.from_string (v)
except MOM.Error.No_Such_Entity :
yield None
else :
yield None
# end def _raw_epk_iter
# end class Object
class Link (Id_Entity) :
"""Scope-specific manager for essential link-types."""
def __call__ (self, * args, ** kw) :
try :
(args, kw), this = self._epkified (* args, ** kw)
self._checked_roles (* args, ** kw)
if not kw.get ("raw", False) :
args = tuple \
(self._role_to_cooked_iter (args, auto_create = True))
except MOM.Error.Required_Missing :
### let MOM.Entity handle this case
pass
else :
E_Type = self.E_Type
if E_Type.is_partial :
### try to find non-partial child fitting e-types of `roles`
roles = args [:E_Type.number_of_roles]
scope = self.home_scope
BT = scope.MOM.Id_Entity.E_Type
if all (isinstance (r, BT) for r in roles) :
CT = E_Type.child_np (roles)
if CT is not None :
return scope [CT.type_name] (* args, ** kw)
return self.__super.__call__ (* args, ** kw)
# end def __call__
def applicable_objects (self, objects) :
"""Returns all `objects` not refusing to be linked by `self._etype`."""
type_name = self._etype.Essence.type_name
return [o for o in objects if type_name not in o.refuse_links]
# end def applicable_objects
def r_query (self, * filters, ** kw) :
"""Return all links matching the conditions in `filters` and `kw`.
`r_query` behaves similar to `query` but provides the additional
features:
- if `kw` contains role names or other id-entity-attributes,
* the name can be a generic or a specific role name (`query`
only allows generic role names)
* the values passed can be `epk` in cooked or raw form (for
`query`, objects must be passed)
* the returned links are restricted to those linking the
specified objects
"""
Type = self._etype
map = getattr (Type, "role_map", None)
rkw = {}
if map :
for k in list (kw) :
aie = None
if k in map :
aie = Type.Roles [map [k]]
elif k in Type.attributes :
a = Type.attributes [k]
if isinstance (a.attr, MOM.Attr.A_Id_Entity) :
aie = a.attr
if aie is not None :
try :
obj = self._cooked_role (aie, kw.pop (k))
if not isinstance (obj, aie.P_Type) :
return []
rkw [aie.name] = obj
except MOM.Error.No_Such_Entity :
return TFL.Q_Result (())
if rkw :
kw = dict (kw, ** rkw)
result = self.query (* filters, ** kw)
return result
# end def r_query
def r_query_s (self, * filters, ** kw) :
"""Return `self.r_query (* filters, ** kw)`
sorted by `kw.get ("sort_key", Type.sort_key)`.
"""
### Need to use `Q_Result_Composite` because `Type.sort_key` doesn't
### work with some backends (SQL, I am looking at you)
sort_key = kw.pop ("sort_key", self._etype.sort_key)
result = self.r_query (* filters, ** kw)
result = self.ems.Q_Result_Composite ([result], sort_key)
return result
# end def r_query_s
def links_of (self, obj, * filters, ** kw) :
"""Return all links to `obj` (considers `obj` for each of the roles)."""
queries = []
r_query = self.ems.r_query
sort_key = kw.pop ("sort_key", False)
strict = kw.pop ("strict", False)
Type = self._etype
for r in Type.Roles :
if isinstance (obj, r.role_type) :
pk = self._cooked_role (r, obj)
queries.append \
(r_query (r.assoc, {r.name : pk}, strict = strict))
result = self.ems.Q_Result_Composite (queries)
if sort_key is not None :
result = result.order_by (Type.sort_key_pm (sort_key))
return result
# end def links_of
def _checked_roles (self, * epk, ** kw) :
if kw.get ("raw", False) :
epk = tuple (self._raw_epk_iter (epk))
else :
epk = tuple (self._role_to_cooked_iter (epk))
etype = self._etype
errors = []
r_query = self.ems.r_query
for r, pk in zip (etype.Roles, epk) :
if r.max_links >= 0 :
links = r_query (r.assoc, {r.name : pk}, strict = True)
nol = links.count ()
if nol >= r.max_links :
errors.append \
(MOM.Error.Multiplicity (etype, r, pk, epk, * links))
if errors :
exc = errors [0] if len (errors) == 1 else \
MOM.Error.Multiplicity_Errors (_T (etype.ui_name), errors)
raise exc
# end def _checked_roles
def _cooked_role (self, r, v) :
result = v
if v is not None and not isinstance (result, MOM.Entity) :
if not isinstance (v, (dict, tuple, list, int)) :
if not (v.startswith ("(") and v.endswith (")")) :
v = (v, )
result = r.from_string (v)
return result
# end def _cooked_role
def _raw_epk_iter (self, epk) :
for (pka, v) in zip (self._etype.primary, epk) :
try :
if getattr (pka, "role_type", None) :
### Allow role attributes to be passed as objects even if
### `raw` is specified
v = self._cooked_role (pka, v)
elif v is not None :
v = pka.from_string (v)
except MOM.Error.No_Such_Entity :
v = None
yield v
# end def _raw_epk_iter
def _role_to_cooked_iter (self, epk, auto_create = False) :
for (r, (pka, v)) in paired \
(self._etype.Roles, zip (self._etype.primary, epk)) :
if r is not None :
### Allow role attributes to be passed as raw values even if
### `raw` is not specified
try :
v = self._cooked_role (r, v)
except MOM.Error.No_Such_Entity :
if auto_create :
scope = self.home_scope
et = scope [r.role_type.type_name]
if et.is_partial and et.default_child :
et = et.default_child
v = et (* v, implicit = True, raw = True)
else :
v = None
elif v is not None :
try :
v = pka.cooked (v)
except MOM.Error.No_Such_Entity :
v = None
yield v
# end def _role_to_cooked_iter
_cooked_epk_iter = _role_to_cooked_iter
# end class Link
class Link1 (Link) :
"""Scope-specific manager for essential unary link-types."""
# end class Link1
class Link2 (Link) :
"""Scope-specific manager for essential binary link-types."""
### XXX dfc_synthesizer
# end class Link2
class Link3 (Link) :
"""Scope-specific manager for essential ternary link-types."""
# end class Link3
__doc__ = """
`MOM.E_Type_Manager` provides classes implementing scope-specific managers
for essential object and link types.
For each essential object and link type, a scope provides a
`E_Type_Manager` that is accessible under the `type_name` of the essential
type in question.
For instance, the `E_Type_Manager` for an essential
object type `BMT.Mouse` of a scope `s` can be accessed as::
s.BMT.Mouse
and provides methods to create and query instances of `BMT.Mouse`. A new
mouse named `mickey` is created by::
s.BMT.Mouse ("mickey")
The transitive extension of mice, i.e., the extension of `BMT.Mouse` and
all classes derived from it, is computed by the query::
s.BMT.Mouse.query ()
"""
if __name__ != "__main__" :
MOM._Export_Module ()
### __END__ MOM.E_Type_Manager
| xiaochang91/tapyr | _MOM/E_Type_Manager.py | E_Type_Manager.py | py | 20,532 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "_TFL.TFL.Meta",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "_TFL.TFL",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "_TFL.TFL.Meta",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "_TFL.TFL",
"line_n... |
19581520317 | import os
import time
from collections import defaultdict
from os.path import join as osjoin
import csv
from pyspark.sql import SparkSession
import pyspark.sql.types as T
from util.file_manager import file_manager
from util.cosine_similarity import calculate_cosine_similarity
from core.directory import (
src_embeddings_dir, susp_embeddings_dir, susp_stats_dir, csv_dir,
parquet_train_classifier_dir, train_classifier_log_file
)
spark = SparkSession.builder.appName('test_csv').getOrCreate()
schema = T.StructType([
T.StructField('cosine_similarity', T.FloatType(), False),
T.StructField('is_plagiarism', T.IntegerType(), False)
])
def convert_from_csv_to_parquet(
csv_dir, csv_file, parquet_root_dir, parquet_filename
):
df = spark.read.csv(osjoin(csv_dir, csv_file), header=False, schema=schema)
df.write.format('parquet').save(osjoin(parquet_root_dir, parquet_filename))
print(f'done\t', end='')
# stats for a single suspicious file
# convert susp json stats file to stats that can be use for compare susp file with src files
# stats = {'src_name.txt': [{ 'src': set(), 'susp': set() }]
def get_stats_for_a_susp_file(file):
raw_susp_stats = file_manager.read_json(file)
stats = defaultdict(list)
for item in raw_susp_stats['file_stats']:
para_len = item['paragraph_length']
start_index_in_src = item['src_start_index']
insert_index_in_susp = item['susp_insert_index']
stats[item['src_file']].append({
'src': set(range(start_index_in_src, start_index_in_src+para_len)),
'susp': set(range(insert_index_in_susp, insert_index_in_susp+para_len))
})
return stats
# main_stats = {
# 'src_name.txt': [{'src': set(), 'susp': set()}],
# 'src_name.txt': [{'src': set(), 'susp': set()}]
# }
def is_plagiarism_sentence(src_index, susp_index, src_name, main_stats):
if src_name in main_stats:
for index, item in enumerate(main_stats[src_name]):
if src_index in item['src'] and susp_index in item['susp']:
main_stats[src_name][index]['src'].remove(src_index)
main_stats[src_name][index]['susp'].remove(susp_index)
return 1, main_stats
return 0, main_stats
def read_embeddings(dir, file):
return file_manager.pickle_load(osjoin(dir, file))
def stream_source_embeddings_from_pickle(num_of_file=3):
src_embeddings_files = os.listdir(src_embeddings_dir)
for start_index in range(0, len(src_embeddings_files), num_of_file):
source_embeddings = []
for src_emb in src_embeddings_files[start_index: start_index+num_of_file]:
source_embeddings.extend(
file_manager.pickle_load(osjoin(src_embeddings_dir, src_emb))
)
yield source_embeddings
susp_list_file = osjoin('..', 'stats_about_files', 'susp_for_train_model.txt')
susp_list = file_manager.read_line_by_line(susp_list_file)
susp_list = [f'embddings_{file}.pk' for file in susp_list]
for susp_embeddings_file in susp_list:
start = time.time()
suspicious_embeddings = read_embeddings(susp_embeddings_dir, susp_embeddings_file)
susp_file_name = susp_embeddings_file[:-7]
main_stats = get_stats_for_a_susp_file(osjoin(susp_stats_dir, susp_file_name + '.json'))
csv_file = osjoin(csv_dir, susp_file_name + '.csv')
print(f'Convert {susp_file_name}...', end='')
for source_embeddings in stream_source_embeddings_from_pickle():
result = []
for susp_row in suspicious_embeddings:
for src_row in source_embeddings:
sim = calculate_cosine_similarity(susp_row['embedding'], src_row['embedding'])
is_plg, main_stats = is_plagiarism_sentence(
src_row['index'], susp_row['index'], src_row['filename'], main_stats
)
result.append((sim, is_plg))
with open(csv_file, 'a') as f:
writer = csv.writer(f)
writer.writerows(result)
# for performace in read/write dataframe and disk storage
# convert csv to parquet format and then remove csv file
convert_from_csv_to_parquet(csv_dir, csv_file, parquet_train_classifier_dir, susp_file_name)
os.remove(osjoin(csv_dir, csv_file))
execute_time = round(time.time() - start, 2) / 60
log_content = f'{susp_embeddings_file} {execute_time} mins'
file_manager.append_single_line(train_classifier_log_file, log_content)
print(execute_time, 'mins') | oldguard69/lvtn | server/core/4_make_data_for_training_classifier.py | 4_make_data_for_training_classifier.py | py | 4,560 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 18,
"usage_type"... |
35260443444 | import logging
from typing import List, Optional
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from debunkbot.models import (
Claim,
GoogleSheetCredentials,
MessageTemplate,
MessageTemplateSource,
)
logger = logging.getLogger(__name__)
class GoogleSheetHelper(object):
"""Helper class for getting data from google sheet"""
def __init__(self) -> None:
"""Instance method to initialize Google Drive API
:param self:
:return: None
"""
self.__scope = [
"https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive",
]
credentials = GoogleSheetCredentials.objects.first()
if credentials:
google_credentials = GoogleSheetCredentials.objects.first().credentials
else:
raise Exception("Google credentials have not been set up.")
self.__credentials = ServiceAccountCredentials.from_json_keyfile_dict(
google_credentials, scopes=self.__scope
)
self.__client = gspread.authorize(self.__credentials)
def get_sheet(self, sheet_key):
return self.__client.open_by_key(sheet_key)
def open_work_sheet(self, sheet_id, work_sheet_name) -> Optional[List[dict]]:
"""Instance method to open a worksheet and get the data
in Space Allocation sheet
:param self: Instance of GoogleSheetHelper
:return: Sheet Record as dict or None
"""
sheet = self.get_sheet(sheet_id)
worksheet = sheet.worksheet(work_sheet_name)
try:
return worksheet.get_all_records()
except gspread.exceptions.SpreadsheetNotFound:
return None
def get_claims(self) -> Optional[List[dict]]:
"""
Instance method that loads the claims either from the
cache or directly from google's servers depending on whether
we have a saved version in our cache or not
:param self: Instance of GoogleSheetHelper
:return: Claims
"""
claims = Claim.objects.all()
return claims
def fetch_response_messages(self):
# Delete all existing messages and create new ones.
MessageTemplate.objects.all().delete()
message_template_sources = MessageTemplateSource.objects.all()
message_templates = []
for message_template_source in message_template_sources:
try:
sheet = self.get_sheet(
message_template_source.spreadsheet_id
).worksheet(message_template_source.worksheet)
response_message_templates = sheet.get_all_records()
for response_message_template in response_message_templates:
message_template = response_message_template.get(
message_template_source.column
)
if message_template and message_template != "":
message_template_category = message_template_source.worksheet
message_templage = MessageTemplate(
message_template=message_template,
message_template_source=message_template_source,
message_template_category=message_template_category,
)
message_templates.append(message_templage)
except Exception:
continue
MessageTemplate.objects.bulk_create(message_templates)
| CodeForAfrica/DebunkBot | debunkbot/utils/gsheet/helper.py | helper.py | py | 3,569 | python | en | code | 8 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "debunkbot.models.GoogleSheetCredentials.objects.first",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "debunkbot.models.GoogleSheetCredentials.objects",
"line_number": 29,
... |
74959952508 | from django.db import models
from django.core.validators import RegexValidator
from django.contrib.auth.models import AbstractUser
from django.db import models
from libgravatar import Gravatar
# Create your models here.
class User(AbstractUser):
"""User model used for authentication."""
class Experience(models.TextChoices):
BEGINNER = 'B'
INTERMEDIATE = 'I'
ADVANCED = 'A'
MASTER = 'M'
GRANDMASTER = 'G'
username = models.CharField(
max_length=30,
unique=True,
validators=[
RegexValidator(
regex='^[a-z0-9]([._-](?![._-])|[a-z0-9])*[a-z0-9]$',
message='Usernames may only contain lowercase characters '
'and . _ - but not as '
'the first or last character.',
code='invalid_username'
)
]
)
"""Attributes of Users."""
name = models.CharField(max_length=100, blank=False)
email = models.EmailField(unique=True, blank=False)
public_bio = models.CharField(max_length=250, blank=False)
chess_experience = models.CharField(max_length=1, choices=Experience.choices, default=Experience.BEGINNER)
def gravatar(self, size=120):
"""Return a URL to the user's gravatar."""
gravatar_object = Gravatar(self.email)
gravatar_url = gravatar_object.get_image(size=size, default='mp')
return gravatar_url
| amir-rahim/ChessClubManagementSystem | clubs/models/users.py | users.py | py | 1,456 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "django.contrib.auth.models.AbstractUser",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.models.TextChoices",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
... |
10260578739 | import json
import heapq
import math
#get texts
with open('10k_tokenized_texts.json', 'r') as file:
tokenized_texts = json.load(file)
#count word frequency and create vocabulary
wordfreq = {}
for text in tokenized_texts:
for token in text:
if token not in wordfreq.keys():
wordfreq[token] = 1
else:
wordfreq[token] += 1
#get 10k most frequent words
import heapq
most_freq = heapq.nlargest(10000, wordfreq, key=wordfreq.get)
#count document occurence (= in how many different documents a word appears)
document_occurence = [0] * len(most_freq)
for i in range(len(most_freq)):
for text in tokenized_texts:
if most_freq[i] in text:
document_occurence[i] += 1
#get inverse document frequency (idf) for each word
idf = [0] * len(most_freq)
for i in range(len(most_freq)):
idf[i] = (math.log(len(tokenized_texts)/document_occurence[i]))
#create bag of words vectors with tf-idf weighting
tfidf_vecs = []
for i in range(len(tokenized_texts)):
tfidf_vec = [0] * len(most_freq)
for j in range(len(most_freq)):
tf = tokenized_texts[i].count(most_freq[j])/(len(tokenized_texts[i])+1) #weighs document length
tfidf_vec[j] = tf * idf[j]
tfidf_vecs.append(tfidf_vec)
#dump to files
with open('10k_bow_tfidf_embeds.json', 'w') as file:
json.dump(tfidf_vecs, file)
| iwillemse/pre-uni | code/bow-tfidf.py | bow-tfidf.py | py | 1,368 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "heapq.nlargest",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 45,
... |
35727586260 | #!/usr/bin/python
import pygame, sys, game
from pygame.locals import *
WIDTH = 640
HEIGHT = 480
DRAWSTEP = 3
TICK = 30
VOLATILITY = 0.8
TIMESTEP = float(TICK)/1000
if len(sys.argv) < 2:
ORDER = 2
else:
ORDER = int(sys.argv[1])
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255,255,255)
pygame.init()
fpsClock = pygame.time.Clock()
font = pygame.font.Font(None, 36)
window = pygame.display.set_mode((WIDTH,HEIGHT))
pygame.display.set_caption('Deriv')
drawX = range(0, WIDTH/2, DRAWSTEP)
drawY = [HEIGHT/2] * len(drawX)
numDraw = len(drawX)
cDerivatives = [0] * (ORDER+1)
pDerivatives = cDerivatives
paused = True
game = game.Game(ORDER, len(drawX))
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mouseX, mouseY = event.pos
elif event.type == MOUSEBUTTONUP:
paused = not paused
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if not paused:
mouseX, mouseY = pygame.mouse.get_pos()
game.tick(VOLATILITY * (1-2*float(mouseY)/HEIGHT), TIMESTEP)
#cDerivatives[ORDER] = VOLATILITY * (1 - 2*float(mouseY)/HEIGHT)
#for i in range(ORDER,0,-1):
#cDerivatives[i-1] = pDerivatives[i-1] + 0.5*TIMESTEP*(pDerivatives[i] + cDerivatives[i])
#pDerivatives = cDerivatives
#drawY.append(int(0.5*HEIGHT*(1-cDerivatives[0])))
drawY.append(int(0.5*HEIGHT*(1-game.history[-1])))
drawY.pop(0)
window.fill(BLACK)
if paused:
text = font.render("Paused", True, WHITE)
textpos = text.get_rect(centerx = WIDTH/2)
textpos.top = 50
window.blit(text, textpos)
for i in range(0, min(len(drawY),numDraw)-1):
pygame.draw.line(window, WHITE, (drawX[i],drawY[i]), (drawX[i+1],drawY[i+1]))
pygame.display.update()
fpsClock.tick(TICK)
| TheBB/deriv | deriv.py | deriv.py | py | 1,999 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.Color",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_num... |
9756222638 | import theano
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from theano.tensor.signal import pool
from theano.tensor.nnet import conv3d2d
import numpy as np
from collections import OrderedDict
from .. import config
from .numpy_backend import get_random_magic_seed, get_random_magic_seed
_FLOATX = config.floatX()
_EPSILON = config.epsilon()
# ===========================================================================
# INTERNAL UTILS
# ===========================================================================
theano.config.floatX = _FLOATX
def _on_gpu():
'''Return whether the session is set to
run on GPU or not (i.e. on CPU).
'''
return theano.config.device[:3] == 'gpu' or theano.sandbox.cuda.cuda_enabled
if _on_gpu():
'''Import cuDNN only if running on GPU:
not having Cuda installed should not
prevent from running the present code.
'''
from theano.sandbox.cuda import dnn
def get_session():
return _on_gpu()
# ===========================================================================
# VARIABLE MANIPULATION
# ===========================================================================
def variable(value, dtype=_FLOATX, name=None, broadcastable=None):
'''Instantiate a tensor variable.
'''
value = np.asarray(value, dtype=dtype)
if broadcastable:
return theano.shared(value=value, name=name, strict=False,
broadcastable=broadcastable)
return theano.shared(value=value, name=name, strict=False)
def zeros_var(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-zeros variable.
'''
return variable(np.zeros(shape), dtype, name)
def ones_var(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-ones variable.
'''
return variable(np.ones(shape), dtype, name)
def is_variable(v):
return isinstance(v, theano.compile.SharedVariable)
_PLACEHOLDER_ID = 0
_PLACEHOLDER_SHAPE = {}
def placeholder(shape=None, ndim=None, dtype=_FLOATX, name=None):
'''Instantiate an input data placeholder variable.
'''
if shape is None and ndim is None:
raise Exception('Specify either a shape or ndim value.')
if shape is not None:
ndim = len(shape)
broadcast = (False,) * ndim
# ====== Modify add name prefix ====== #
global _PLACEHOLDER_ID
name_prefix = 'ID.%02d.' % _PLACEHOLDER_ID
_PLACEHOLDER_ID += 1
if name is None:
name = ''
name = name_prefix + name
placeholder = T.TensorType(dtype, broadcast)(name)
# store the predefined shape of placeholder
_PLACEHOLDER_SHAPE[name] = \
[None for _ in range(ndim)] if shape is None else shape
return placeholder
def is_expression(v):
'''placeholder also is an expression'''
return isinstance(v, theano.tensor.TensorVariable)
def is_placeholder(v):
if is_expression(v) and v.name in _PLACEHOLDER_SHAPE:
return True
return False
def eval(x):
'''Run a graph.
'''
# just a hack to return placeholder shape when eval
if x in _PLACEHOLDER_SHAPE:
return _PLACEHOLDER_SHAPE[x]
return x.eval()
# ===========================================================================
# Shape operator
# ===========================================================================
def shape(x):
'''Return the shape of a tensor.
Warning: type returned will be different for
Theano backend (Theano tensor type) and TF backend (TF TensorShape).
'''
shape = x.shape
# little to eval the shape of placeholder
if hasattr(x, 'name'):
if x.name in _PLACEHOLDER_SHAPE:
_PLACEHOLDER_SHAPE[shape] = _PLACEHOLDER_SHAPE[x.name]
return shape
def int_shape(x):
return x.shape.eval()
def ndim(x):
return x.ndim
def broadcastable(x):
return x.broadcastable
def addbroadcast(x, *axes):
return T.addbroadcast(x, *axes)
# ===========================================================================
# Predefined data
# ===========================================================================
def zeros(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-zeros variable.
'''
return T.zeros(shape=shape, dtype=dtype)
def ones(shape, dtype=_FLOATX, name=None):
'''Instantiate an all-ones variable.
'''
return T.ones(shape=shape, dtype=dtype)
def ones_like(x):
return T.ones_like(x)
def zeros_like(x):
return T.zeros_like(x)
def count_params(x):
'''Return number of scalars in a tensor.
Return: numpy integer.
'''
return np.prod(x.shape.eval())
def cast(x, dtype):
if 'theano.' in str(x.__class__):
return T.cast(x, dtype)
return np.cast[dtype](x)
def castX(x):
return cast(x, _FLOATX)
# LINEAR ALGEBRA
'''
Assumed overridden:
+, -, /, *, +=, -=, *=, /=
'''
def dot(x, y):
return T.dot(x, y)
def transpose(x):
return T.transpose(x)
def gather(reference, indices):
'''reference: a tensor.
indices: an int tensor of indices.
Return: a tensor of same type as reference.
'''
return reference[indices]
# ===========================================================================
# ELEMENT-WISE OPERATIONS
# ===========================================================================
def var(x, axis=None, keepdims=False):
return T.var(x, axis=axis, keepdims=keepdims)
def max(x, axis=None, keepdims=False):
return T.max(x, axis=axis, keepdims=keepdims)
def min(x, axis=None, keepdims=False):
return T.min(x, axis=axis, keepdims=keepdims)
def sum(x, axis=None, keepdims=False):
'''Sum of the values in a tensor, alongside the specified axis.
'''
return T.sum(x, axis=axis, keepdims=keepdims)
def prod(x, axis=None, keepdims=False):
'''Multiply the values in a tensor, alongside the specified axis.
'''
return T.prod(x, axis=axis, keepdims=keepdims)
def mean(x, axis=None, keepdims=False):
dtype = None
if 'int' in x.dtype:
dtype = _FLOATX
return T.mean(x, axis=axis, keepdims=keepdims, dtype=dtype)
def std(x, axis=None, keepdims=False):
return T.std(x, axis=axis, keepdims=keepdims)
def any(x, axis=None, keepdims=False):
'''Bitwise reduction (logical OR).
'''
return T.any(x, axis=axis, keepdims=keepdims)
def argmax(x, axis=-1):
return T.argmax(x, axis=axis, keepdims=False)
def argsort(x, axis=-1):
return T.argsort(x, axis)
def argtop_k(x, k=1):
# top-k accuracy
top = T.argsort(x, axis=-1)
# (Theano cannot index with [..., -top_k:], we need to simulate that)
top = top[[slice(None) for _ in range(top.ndim - 1)] +
[slice(-k, None)]]
top = top[(slice(None),) * (top.ndim - 1) + (slice(None, None, -1),)]
return top
def argmin(x, axis=-1):
return T.argmin(x, axis=axis, keepdims=False)
def square(x):
return T.sqr(x)
def abs(x):
return T.abs_(x)
def sqrt(x):
x = T.clip(x, 0., np.inf)
return T.sqrt(x)
def exp(x):
return T.exp(x)
def log(x):
return T.log(x)
def round(x):
return T.round(x)
def pow(x, a):
return T.pow(x, a)
def clip(x, min_value, max_value):
if max_value < min_value:
max_value = min_value
return T.clip(x, min_value, max_value)
def maximum(x, y):
return T.maximum(x, y)
def minimum(x, y):
return T.minimum(x, y)
# ===========================================================================
# SHAPE OPERATIONS
# ===========================================================================
def reverse(x, axis=-1):
'''Apply [::-1] to appropriate axis'''
if axis < 0:
axis += x.ndim
return x[(slice(None),) * axis + (slice(None, None, -1),)]
def concatenate(tensors, axis=-1):
return T.concatenate(tensors, axis=axis)
def reshape(x, shape):
return T.reshape(x, shape)
def dimshuffle(x, pattern):
'''Transpose dimensions.
pattern should be a tuple or list of
dimension indices, e.g. [0, 2, 1].
'''
pattern = tuple(pattern)
return x.dimshuffle(pattern)
def repeat_elements(x, rep, axis):
'''Repeat the elements of a tensor along an axis, like np.repeat.
If x has shape (s1, s2, s3) and axis=1, the output
will have shape (s1, s2 * rep, s3).
'''
return T.repeat(x, rep, axis=axis)
def resize_images(X, height_factor, width_factor, dim_ordering):
'''Resize the images contained in a 4D tensor of shape
- [batch, channels, height, width] (for 'th' dim_ordering)
- [batch, height, width, channels] (for 'tf' dim_ordering)
by a factor of (height_factor, width_factor). Both factors should be
positive integers.
'''
if dim_ordering == 'th':
output = repeat_elements(X, height_factor, axis=2)
output = repeat_elements(output, width_factor, axis=3)
return output
elif dim_ordering == 'tf':
output = repeat_elements(X, height_factor, axis=1)
output = repeat_elements(output, width_factor, axis=2)
return output
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
def repeat(x, n):
'''Repeat a 2D tensor.
If x has shape (samples, dim) and n=2,
the output will have shape (samples, 2, dim).
'''
assert x.ndim == 2
x = x.dimshuffle((0, 'x', 1))
return T.extra_ops.repeat(x, n, axis=1)
def tile(x, n):
return T.tile(x, n)
def flatten(x, outdim=2):
return T.flatten(x, outdim)
def expand_dims(x, dim=-1):
'''Add a 1-sized dimension at index "dim".
'''
pattern = [i for i in range(x.type.ndim)]
if dim < 0:
if x.type.ndim == 0:
dim = 0
else:
dim = dim % x.type.ndim + 1
pattern.insert(dim, 'x')
return x.dimshuffle(pattern)
def squeeze(x, axis):
'''Remove a 1-dimension from the tensor at index "axis".
'''
x = T.addbroadcast(x, axis)
return T.squeeze(x)
def temporal_padding(x, padding=1):
'''Pad the middle dimension of a 3D tensor
with "padding" zeros left and right.
Appologies for the inane API, but Theano makes this
really hard.
'''
input_shape = x.shape
output_shape = (input_shape[0],
input_shape[1] + 2 * padding,
input_shape[2])
output = T.zeros(output_shape)
return T.set_subtensor(output[:, padding:x.shape[1] + padding, :], x)
def spatial_2d_padding(x, padding=(1, 1), dim_ordering='th'):
'''Pad the 2nd and 3rd dimensions of a 4D tensor
with "padding[0]" and "padding[1]" (resp.) zeros left and right.
'''
input_shape = x.shape
if dim_ordering == 'th':
output_shape = (input_shape[0],
input_shape[1],
input_shape[2] + 2 * padding[0],
input_shape[3] + 2 * padding[1])
output = T.zeros(output_shape)
indices = (slice(None),
slice(None),
slice(padding[0], input_shape[2] + padding[0]),
slice(padding[1], input_shape[3] + padding[1]))
elif dim_ordering == 'tf':
output_shape = (input_shape[0],
input_shape[1] + 2 * padding[0],
input_shape[2] + 2 * padding[1],
input_shape[3])
output = T.zeros(output_shape)
indices = (slice(None),
slice(padding[0], input_shape[1] + padding[0]),
slice(padding[1], input_shape[2] + padding[1]),
slice(None))
else:
raise Exception('Invalid dim_ordering: ' + dim_ordering)
return T.set_subtensor(output[indices], x)
def stack(*x):
return T.stack(*x)
# ===========================================================================
# VALUE MANIPULATION
# ===========================================================================
def get_value(x, borrow=False):
if not hasattr(x, 'get_value'):
raise Exception("'get_value() can only be called on a variable. " +
"If you have an expression instead, use eval().")
return x.get_value(borrow=borrow)
def set_value(x, value):
x.set_value(np.asarray(value, dtype=x.dtype))
def set_subtensor(x, y):
return T.set_subtensor(x, y)
# ===========================================================================
# GRAPH MANIPULATION
# ===========================================================================
_GLOBALS_UPDATES = OrderedDict()
def add_global_updates(variable, value):
'''trick to update tensorflow variables anywhere
This dictionary will be reseted after each time you create a function
'''
_GLOBALS_UPDATES[variable] = value
def reset_global_updates():
global _GLOBALS_UPDATES
_GLOBALS_UPDATES = OrderedDict()
class Function(object):
def __init__(self, inputs, outputs, updates=[], **kwargs):
if isinstance(updates, OrderedDict):
updates = updates.items()
# ====== add and reset global update ====== #
updates += _GLOBALS_UPDATES.items()
reset_global_updates()
self.function = theano.function(
inputs, outputs,
updates=updates,
on_unused_input='ignore', # TODO: remove this when stop testing
allow_input_downcast=True, **kwargs)
def __call__(self, *inputs):
return self.function(*inputs)
def function(inputs, outputs, updates=[]):
return Function(inputs, outputs, updates=updates)
def grad_clip(x, clip):
'''
This clip the gradient of expression, used on forward pass but clip the
gradient on backward pass
This is an elemwise operation.
Parameters
----------
x: expression
the variable we want its gradient inputs clipped
lower_bound: float
The lower bound of the gradient value
upper_bound: float
The upper bound of the gradient value.
Example
-------
>>> x = theano.tensor.scalar()
>>>
>>> z = theano.tensor.grad(grad_clip(x, -1, 1)**2, x)
>>> z2 = theano.tensor.grad(x**2, x)
>>>
>>> f = theano.function([x], outputs = [z, z2])
>>>
>>> print(f(2.0)) # output (1.0, 4.0)
Note
----
We register an opt in tensor/opt.py that remove the GradClip.
So it have 0 cost in the forward and only do work in the grad.
'''
return theano.gradient.grad_clip(x, -clip, clip)
def gradients(loss, variables, consider_constant=None, known_grads=None):
"""
Return symbolic gradients for one or more variables with respect to some
cost.
For more information about how automatic differentiation works in Theano,
see :mod:`gradient`. For information on how to implement the gradient of
a certain Op, see :func:`grad`.
Parameters
----------
cost : scalar (0-dimensional) tensor variable or None
Value with respect to which we are differentiating. May be
`None` if known_grads is provided.
wrt : variable or list of variables
term[s] for which we want gradients
consider_constant : list of expressions(variables)
expressions not to backpropagate through
known_grads : dict, optional
A dictionary mapping variables to their gradients. This is
useful in the case where you know the gradient on some
variables but do not know the original cost.
Returns
-------
variable or list/tuple of variables (matches `wrt`)
symbolic expression of gradient of `cost` with respect to each
of the `wrt` terms. If an element of `wrt` is not
differentiable with respect to the output, then a zero
variable is returned.
Example
-------
>>> # For consider_constant:
>>> a = T.variable(1.2)
>>> b = T.variable(1.3)
>>> x = a * b
>>>
>>> y = T.variable(2.)
>>> z = T.variable(1.)
>>>
>>> z_pred = x * y
>>> loss = T.pow((z - z_pred), 2)
>>>
>>> G = T.gradients(loss, [a, b, y], consider_constant=[x])
>>>
>>> for g in G:
>>> print(g.eval())
>>> # a_grad=0. b_grad=0. y_grad=6.614
"""
return T.grad(loss, variables,
consider_constant=consider_constant, known_grads=known_grads,
disconnected_inputs='warn')
def jacobian(loss, variables):
return theano.gradient.jacobian(loss, variables, disconnected_inputs='warn')
def hessian(loss, variables):
return theano.gradient.hessian(loss, variables, disconnected_inputs='warn')
# ===========================================================================
# CONTROL FLOW
# ===========================================================================
def scan(step_fn, sequences=None, outputs_info=None, non_sequences=None,
n_steps=None, truncate_gradient=-1, go_backwards=False):
return theano.scan(step_fn,
sequences=sequences,
outputs_info=outputs_info,
non_sequences=non_sequences,
n_steps=n_steps, truncate_gradient=truncate_gradient,
go_backwards=go_backwards,
strict=False)
def loop(step_fn, n_steps,
sequences=None, outputs_info=None, non_sequences=None,
go_backwards=False):
"""
Helper function to unroll for loops. Can be used to unroll theano.scan.
The parameter names are identical to theano.scan, please refer to here
for more information.
Note that this function does not support the truncate_gradient
setting from theano.scan.
Parameters
----------
step_fn : function
Function that defines calculations at each step.
sequences : TensorVariable or list of TensorVariables
List of TensorVariable with sequence data. The function iterates
over the first dimension of each TensorVariable.
outputs_info : list of TensorVariables
List of tensors specifying the initial values for each recurrent
value. Specify output_info to None for non-arguments to
the step_function
non_sequences: list of TensorVariables
List of theano.shared variables that are used in the step function.
n_steps: int
Number of steps to unroll.
go_backwards: bool
If true the recursion starts at sequences[-1] and iterates
backwards.
Returns
-------
List of TensorVariables. Each element in the list gives the recurrent
values at each time step.
"""
if not isinstance(sequences, (list, tuple)):
sequences = [] if sequences is None else [sequences]
# When backwards reverse the recursion direction
counter = range(n_steps)
if go_backwards:
counter = counter[::-1]
output = []
# ====== check if outputs_info is None ====== #
if outputs_info is not None:
prev_vals = outputs_info
else:
prev_vals = []
output_idx = [i for i in range(len(prev_vals)) if prev_vals[i] is not None]
# ====== check if non_sequences is None ====== #
if non_sequences is None:
non_sequences = []
# ====== Main loop ====== #
for i in counter:
step_input = [s[i] for s in sequences] + \
[prev_vals[idx] for idx in output_idx] + \
non_sequences
out_ = step_fn(*step_input)
# The returned values from step can be either a TensorVariable,
# a list, or a tuple. Below, we force it to always be a list.
if isinstance(out_, T.TensorVariable):
out_ = [out_]
if isinstance(out_, tuple):
out_ = list(out_)
output.append(out_)
prev_vals = output[-1]
# iterate over each scan output and convert it to same format as scan:
# [[output11, output12,...output1n],
# [output21, output22,...output2n],...]
output_scan = []
for i in range(len(output[0])):
l = map(lambda x: x[i], output)
output_scan.append(T.stack(*l))
return output_scan
def rnn(step_function, inputs, initial_states,
go_backwards=False, mask=None, constants=None):
'''Iterates over the time dimension of a tensor.
Parameters
----------
inputs: tensor of temporal data of shape (samples, time, ...)
(at least 3D).
step_function:
Parameters:
input: tensor with shape (samples, ...) (no time dimension),
representing input for the batch of samples at a certain
time step.
states: list of tensors.
Returns:
output: tensor with shape (samples, ...) (no time dimension),
new_states: list of tensors, same length and shapes
as 'states'.
initial_states: tensor with shape (samples, ...) (no time dimension),
containing the initial values for the states used in
the step function.
go_backwards: boolean. If True, do the iteration over
the time dimension in reverse order.
mask: binary tensor with shape (samples, time),
with a zero for every element that is masked.
constants: a list of constant values passed at each step.
Returns
-------
A tuple (last_output, outputs, new_states).
last_output: the latest output of the rnn, of shape (samples, ...)
outputs: tensor with shape (samples, time, ...) where each
entry outputs[s, t] is the output of the step function
at time t for sample s.
new_states: list of tensors, latest states returned by
the step function, of shape (samples, ...).
'''
ndim = inputs.ndim
assert ndim >= 3, 'Input should be at least 3D.'
axes = [1, 0] + list(range(2, ndim))
inputs = inputs.dimshuffle(axes)
if mask is not None:
if mask.ndim == ndim - 1:
mask = expand_dims(mask)
assert mask.ndim == ndim
mask = mask.dimshuffle(axes)
if constants is None:
constants = []
# build an all-zero tensor of shape (samples, output_dim)
initial_output = step_function(inputs[0], initial_states + constants)[0] * 0
# Theano gets confused by broadcasting patterns in the scan op
initial_output = T.unbroadcast(initial_output, 0, 1)
def _step(input, mask, output_tm1, *states):
output, new_states = step_function(input, states)
# output previous output if masked.
output = T.switch(mask, output, output_tm1)
return_states = []
for state, new_state in zip(states, new_states):
return_states.append(T.switch(mask, new_state, state))
return [output] + return_states
results, _ = theano.scan(
_step,
sequences=[inputs, mask],
outputs_info=[initial_output] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
else:
def _step(input, *states):
output, new_states = step_function(input, states)
return [output] + new_states
results, _ = theano.scan(
_step,
sequences=inputs,
outputs_info=[None] + initial_states,
non_sequences=constants,
go_backwards=go_backwards)
# deal with Theano API inconsistency
if type(results) is list:
outputs = results[0]
states = results[1:]
else:
outputs = results
states = []
outputs = T.squeeze(outputs)
last_output = outputs[-1]
axes = [1, 0] + list(range(2, outputs.ndim))
outputs = outputs.dimshuffle(axes)
states = [T.squeeze(state[-1]) for state in states]
return last_output, outputs, states
def switch(condition, then_expression, else_expression):
'''condition: scalar tensor.
'''
return T.switch(condition, then_expression, else_expression)
# ===========================================================================
# NN OPERATIONS
# ===========================================================================
def relu(x, alpha=0., max_value=None):
assert hasattr(T.nnet, 'relu'), ('It looks like like your version of '
'Theano is out of date. '
'Install the latest version with:\n'
'pip install git+git://github.com/Theano/Theano.git --upgrade --no-deps')
x = T.nnet.relu(x, alpha)
if max_value is not None:
x = T.minimum(x, max_value)
return x
def softmax(x):
return T.nnet.softmax(x)
def softplus(x):
return T.nnet.softplus(x)
def linear(x):
return x
def categorical_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.softmax(output)
else:
# scale preds so that the class probas of each sample sum to 1
output /= output.sum(axis=-1, keepdims=True)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.categorical_crossentropy(output, target)
def binary_crossentropy(output, target, from_logits=False):
if from_logits:
output = T.nnet.sigmoid(output)
# avoid numerical instability with _EPSILON clipping
output = T.clip(output, _EPSILON, 1.0 - _EPSILON)
return T.nnet.binary_crossentropy(output, target)
def sigmoid(x):
return T.nnet.sigmoid(x)
def hard_sigmoid(x):
return T.nnet.hard_sigmoid(x)
def tanh(x):
return T.tanh(x)
def dropout(x, level, rescale=True, noise_shape=None,
seed=None, rng=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Parameters
----------
x: A tensor.
level: float(0.-1.)
probability dropout values in given tensor
rescale: bool
whether rescale the outputs by dividing the retain probablity
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: int
A Python integer. Used to create random seeds. See
rng: `tensor.rng`
random generator from tensor class
"""
# ====== Validate arguments ====== #
if seed is None:
seed = get_random_magic_seed()
if rng is None:
rng = _RandomWrapper(RandomStreams(seed=seed),
np.random.RandomState(seed=seed))
elif isinstance(rng, RandomStreams):
rng = _RandomWrapper(rng, np.random.RandomState(seed=seed))
# ====== Dropout ====== #
retain_prob = 1. - level
if noise_shape is None:
x = x * rng.binomial(shape=x.shape, p=retain_prob, dtype=x.dtype)
else:
# validate remove all None or -1 dimension
noise_shape = tuple([x.shape[i] if j is None or j < 0 else j
for i, j in enumerate(noise_shape)])
# auto select broadcast shape
broadcast = [i for i, j in enumerate(noise_shape) if j == 1]
if len(broadcast) > 0:
x = x * T.addbroadcast(
rng.binomial(shape=noise_shape, p=retain_prob, dtype=x.dtype),
*broadcast)
else:
x = x * rng.binomial(shape=noise_shape, p=retain_prob, dtype=x.dtype)
if rescale:
x /= retain_prob
return x
# ==================== Regularizations ==================== #
def l2_normalize(x, axis):
norm = T.sqrt(T.sum(T.square(x), axis=axis, keepdims=True))
return x / norm
def l2_regularize(x):
return T.sum(T.square(x))
def l1_regularize(x):
return T.sum(T.abs_(x))
def jacobian_regularize(hidden, params):
''' Computes the jacobian of the hidden layer with respect to
the input, reshapes are necessary for broadcasting the
element-wise product on the right axis
'''
hidden = hidden * (1 - hidden)
L = expand_dims(hidden, 1) * expand_dims(params, 0)
# Compute the jacobian and average over the number of samples/minibatch
L = T.sum(T.pow(L, 2)) / hidden.shape[0]
return T.mean(L)
def kl_gaussian(mean, logsigma,
prior_mean=0., prior_logsigma=0.):
''' KL-divergence between two gaussians.
Useful for Variational AutoEncoders. Use this as an activation regularizer
Parameters:
-----------
mean, logsigma: parameters of the input distributions
prior_mean, prior_logsigma: paramaters of the desired distribution (note the
log on logsigma)
Note
----
origin implementation from seya:
https://github.com/Philip-Bachman/ICML-2015/blob/master/LogPDFs.py
Copyright (c) Philip Bachman
'''
gauss_klds = 0.5 * (prior_logsigma - logsigma +
((T.exp(logsigma) + (mean - prior_mean)**2.0) / T.exp(prior_logsigma)) - 1.0)
return T.mean(gauss_klds)
def correntropy_regularize(x, sigma=1.):
'''
Note
----
origin implementation from seya:
https://github.com/EderSantana/seya/blob/master/seya/regularizers.py
Copyright (c) EderSantana
'''
return -T.sum(T.mean(T.exp(x**2 / sigma), axis=0)) / T.sqrt(2 * np.pi * sigma)
# ===========================================================================
# CONVOLUTIONS
# ===========================================================================
def conv2d(x, kernel, strides=(1, 1),
border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols)
# TH kernel shape: (depth, input_depth, rows, cols)
# TF input shape: (samples, rows, cols, input_depth)
# TF kernel shape: (rows, cols, input_depth, depth)
x = x.dimshuffle((0, 3, 1, 2))
kernel = kernel.dimshuffle((3, 2, 0, 1))
if image_shape:
image_shape = (image_shape[0], image_shape[3],
image_shape[1], image_shape[2])
if filter_shape:
filter_shape = (filter_shape[3], filter_shape[2],
filter_shape[0], filter_shape[1])
if _on_gpu() and dnn.dnn_available():
if border_mode == 'same':
np_kernel = kernel.eval()
# mode same and even filter
if len([s for s in np_kernel.shape[2:] if s % 2 == 0]) > 0.:
assert strides[0] <= np_kernel.shape[2], \
'strides should be smaller than the convolution window.'
assert strides[1] <= np_kernel.shape[3], \
'strides should be smaller than the convolution window.'
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode='full')
shift_x = (np_kernel.shape[2] - strides[0]) // 2
shift_y = (np_kernel.shape[3] - strides[1]) // 2
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :,
shift_x: shift_x + expected_width,
shift_y: shift_y + expected_height]
else: # same mode and odd filter
border_mode = tuple(s // 2 for s in np_kernel.shape[2:])
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
conv_out = dnn.dnn_conv(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
if border_mode == 'same' or border_mode == 'full':
th_border_mode = 'full'
np_kernel = kernel.eval()
assert strides[0] <= np_kernel.shape[2], 'strides should be smaller than the convolution window.'
assert strides[1] <= np_kernel.shape[3], 'strides should be smaller than the convolution window.'
elif border_mode == 'valid':
th_border_mode = 'valid'
elif isinstance(border_mode, (tuple, list)):
th_border_mode = border_mode
else:
raise Exception('Border mode not supported: ' + str(border_mode))
conv_out = T.nnet.conv2d(x, kernel,
border_mode=th_border_mode,
subsample=strides,
input_shape=image_shape,
filter_shape=filter_shape)
if border_mode == 'same':
shift_x = (np_kernel.shape[2] - strides[0]) // 2
shift_y = (np_kernel.shape[3] - strides[1]) // 2
expected_width = (x.shape[2] + strides[0] - 1) // strides[0]
expected_height = (x.shape[3] + strides[1] - 1) // strides[1]
conv_out = conv_out[:, :,
shift_x: shift_x + expected_width,
shift_y: shift_y + expected_height]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='th',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
conv_mode: string, "conv" or "cross".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, rows, cols, time)
# TH kernel shape: (depth, input_depth, rows, cols, time)
# TF input shape: (samples, rows, cols, time, input_depth)
# TF kernel shape: (rows, cols, time, input_depth, depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
if image_shape:
image_shape = (image_shape[0], image_shape[4],
image_shape[1], image_shape[2],
image_shape[3])
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1],
filter_shape[2])
if _on_gpu() and dnn.dnn_available():
if border_mode == 'same':
np_kernel = kernel.eval()
border_mode = tuple(s // 2 for s in np_kernel.shape[2:])
conv_out = dnn.dnn_conv3d(img=x,
kerns=kernel,
border_mode=border_mode,
subsample=strides)
else:
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_dim1 = (kernel.shape[2] - 1)
pad_dim2 = (kernel.shape[3] - 1)
pad_dim3 = (kernel.shape[4] - 1)
output_shape = (x.shape[0], x.shape[1],
x.shape[2] + pad_dim1,
x.shape[3] + pad_dim2,
x.shape[4] + pad_dim3)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
border_mode_3d = (border_mode, border_mode, border_mode)
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode_3d)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
# support strides by manually slicing the output
if strides != (1, 1, 1):
conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 1))
return conv_out
def pool2d(x, pool_size, strides=(1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 3, 1, 2))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
padding = (w_pad, h_pad)
elif border_mode == 'valid':
padding = (0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else: # CPU veresion support by theano
pool_out = pool.pool_2d(x, ds=pool_size, st=strides,
ignore_border=True,
padding=padding,
mode=pool_mode)
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 1))
return pool_out
def pool3d(x, pool_size, strides=(1, 1, 1), border_mode='valid',
dim_ordering='th', pool_mode='max'):
# ====== dim ordering ====== #
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if dim_ordering == 'tf':
x = x.dimshuffle((0, 4, 1, 2, 3))
# ====== border mode ====== #
if border_mode == 'same':
w_pad = pool_size[0] - 2 if pool_size[0] % 2 == 1 else pool_size[0] - 1
h_pad = pool_size[1] - 2 if pool_size[1] % 2 == 1 else pool_size[1] - 1
d_pad = pool_size[2] - 2 if pool_size[2] % 2 == 1 else pool_size[2] - 1
padding = (w_pad, h_pad, d_pad)
elif border_mode == 'valid':
padding = (0, 0, 0)
elif isinstance(border_mode, (tuple, list)):
padding = tuple(border_mode)
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# ====== pooling ====== #
if _on_gpu() and dnn.dnn_available():
pool_out = dnn.dnn_pool(x, pool_size,
stride=strides,
mode=pool_mode,
pad=padding)
else:
padding = padding[:2]
# pooling over conv_dim2, conv_dim1 (last two channels)
output = pool.pool_2d(input=x.dimshuffle(0, 1, 4, 3, 2),
ds=(pool_size[1], pool_size[0]),
st=(strides[1], strides[0]),
ignore_border=True,
padding=padding,
mode=pool_mode)
# pooling over conv_dim3
pool_out = pool.pool_2d(input=output.dimshuffle(0, 1, 4, 3, 2),
ds=(1, pool_size[2]),
st=(1, strides[2]),
ignore_border=True,
padding=padding,
mode=pool_mode)
# ====== output ====== #
if dim_ordering == 'tf':
pool_out = pool_out.dimshuffle((0, 2, 3, 4, 1))
return pool_out
# ===========================================================================
# RANDOMNESS
# ===========================================================================
class _RandomWrapper(object):
def __init__(self, rng, state):
super(_RandomWrapper, self).__init__()
self._rng = rng
self._state = state
def randint(self):
return self._state.randint(10e6)
def normal(self, shape, mean, std, dtype=_FLOATX):
return self._rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def uniform(self, shape, low, high, dtype=_FLOATX):
return self._rng.uniform(size=shape, low=low, high=high, dtype=dtype)
def binomial(self, shape, p, dtype=_FLOATX):
return self._rng.binomial(size=shape, n=1, p=p, dtype=dtype)
def rng(seed=None):
if seed is None:
seed = get_random_magic_seed()
return _RandomWrapper(RandomStreams(seed=seed),
np.random.RandomState(seed=seed))
def random_normal(shape, mean=0.0, std=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.normal(size=shape, avg=mean, std=std, dtype=dtype)
def random_uniform(shape, low=0.0, high=1.0, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.uniform(shape, low=low, high=high, dtype=dtype)
def random_binomial(shape, p, dtype=_FLOATX, seed=None):
if seed is None:
seed = get_random_magic_seed()
rng = RandomStreams(seed=seed)
return rng.binomial(size=shape, n=1, p=p, dtype=dtype)
'''
more TODO:
tensordot -> soon to be introduced in TF
batched_tensordot -> reimplement
'''
# ===========================================================================
# Comparator
# ===========================================================================
def neq(a, b):
"""a != b"""
return T.neq(a, b)
def eq(a, b):
"""a == b"""
return T.eq(a, b)
def gt(a, b):
"""a > b"""
return T.gt(a, b)
def ge(a, b):
"""a >= b"""
return T.ge(a, b)
def lt(a, b):
"""a < b"""
return T.lt(a, b)
def le(a, b):
"""a <= b"""
return T.le(a, b)
def one_hot(x, nb_class):
''' x: 1D-integer vector '''
ret = T.zeros((x.shape[0], nb_class), dtype=_FLOATX)
ret = T.set_subtensor(ret[T.arange(x.shape[0]), x], 1)
return ret
def one_hot_max(x, axis=-1):
'''
Example
-------
>>> Input: [[0.0, 0.0, 0.5],
>>> [0.0, 0.3, 0.1],
>>> [0.6, 0.0, 0.2]]
>>> Output: [[0.0, 0.0, 1.0],
>>> [0.0, 1.0, 0.0],
>>> [1.0, 0.0, 0.0]]
'''
return T.cast(
T.eq(T.arange(x.shape[axis])[None, :],
T.argmax(x, axis=axis, keepdims=True)),
_FLOATX
)
def apply_mask(x, mask):
'''
x : 3D tensor
mask : 2D tensor
Example
-------
>>> Input: [128, 500, 120]
>>> Mask: [1, 1, 0]
>>> Output: [128, 500, 0]
'''
return T.mul(x, expand_dims(mask, -1))
| trungnt13/odin_old | odin/tensor/theano_backend.py | theano_backend.py | py | 43,646 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "theano.config",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "theano.config",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "theano.sandbox",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarr... |
13522158009 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 7 10:37:20 2019
@author: paul
"""
# import relevant packages
from TwitterAPI import TwitterAPI
import pandas as pd
import json
i = 0 # counter
requestlist = [] # list for storing each call from the api (500 tweets at a time)
# search Criteria
SEARCH_TERM = ''
PRODUCT = 'fullarchive'
LABEL = 'Research'
#API keys to authorise and access the API
consumerKey=""
consumerSecret=""
accessToken=""
accessSecret=""
#Code to initiate API
api = TwitterAPI(consumerKey, consumerSecret,
accessToken, accessSecret)
# loop which makes successive api calls based on amount of results
while True:
if i == 0 :
requestlist.append(api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM,
'fromDate': '201408220000',
'toDate': '201408310000',
'maxResults': 500}))
else:
if requestlist[i-1].json().get('next') == None :
break
else:
requestlist.append(api.request('tweets/search/%s/:%s' % (PRODUCT, LABEL),
{'query':SEARCH_TERM,
'fromDate': '201408220000',
'toDate': '201408310000',
'maxResults': 500,
'next':requestlist[i-1].json()['next']}))
i +=1
#save each payload to csv
for payload in requestlist:
df = pd.read_json(json.dumps(payload.json()['results']))
df.to_csv("acsvfile.csv", mode = 'a')
| prgeddes/TwitterDataExtraction | Search_Save_Tweets.py | Search_Save_Tweets.py | py | 1,563 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "TwitterAPI.TwitterAPI",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.read_json",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 53,
"usage_type": "call"
}
] |
4341271396 | from __future__ import annotations
import logging
import os
from time import sleep
from typing import List, Optional, Union, ClassVar, Dict, Type, Optional, Iterable
from queue import Queue, Empty
from easyflow.common.logger import setupLogger
from easyflow.common.utils import Timer
import threading
logger = setupLogger(__name__)
class ProcessorFactory:
processorDict: Dict[str, Type[Processor]] = {}
@classmethod
def getProcessor(cls: Type['ProcessorFactory'], processorType: str) -> Type[Processor]:
if processorType in cls.processorDict:
return cls.processorDict[processorType]
raise Exception()
@classmethod
def register(cls, class_: Type[Processor]) -> Type[Processor]:
cls.processorDict[class_.type] = class_
return class_
class Processor:
type: ClassVar[str] = ""
def __init__(self, name) -> None:
self.name = name
def run(self) -> None:
pass
@ProcessorFactory.register
class EmptyProcessor(Processor):
type: ClassVar[str] = "EmptyProcessor"
def run(self) -> None:
return
@ProcessorFactory.register
class CommandProcessor(Processor):
type: ClassVar[str] = "CommandProcessor"
def __init__(self, name, command: Union[list, str]):
super().__init__(name)
self.command: str
if isinstance(command, list):
self.command = " && ".join(command)
else:
self.command = command
def run(self) -> None:
os.system(self.command)
class Module:
def __init__(self, name: str,
processor: Processor,
inputs: Optional[List[Data]] = None,
outputs: Optional[List[Data]] = None,
checkInterval: int = 10) -> None:
self.name = name
self.inputs: List[Data] = []
if inputs:
for inputNode in inputs:
self.addInput(inputNode)
self.outputs: List[Data] = []
if outputs:
for outputNode in outputs:
self.addOutput(outputNode)
self.processor = processor
self.checkInterval = checkInterval
# To avoid this module ran by multiple inputNode.
self.running = False
def addInput(self, inputNode: Data) -> None:
self.inputs.append(inputNode)
inputNode.addDownStream(self)
def addOutput(self, outputNode: Data) -> None:
self.outputs.append(outputNode)
def setWorkflow(self, workflow) -> None:
self.workflow = workflow
def _run(self, reportError: bool = False, *args, **kwargs) -> int:
notExists: List[Data] = []
for inputNode in self.inputs:
if not inputNode.checkExists():
notExists.append(inputNode)
if notExists:
if reportError:
raise Exception(f"The following inputs are detected as nonexisting node: {notExists}")
else:
print(f"Module {self.name} failed to run, errorCode: -1")
return -1
self.processor.run()
return 0
def run(self, *args, **kwargs) -> int:
verbose = kwargs.get('verbose', True)
errorCode = -1
while True:
errorCode = self._run(*args, **kwargs)
if errorCode != 0:
sleep(self.checkInterval)
else:
if verbose:
print(f"Module: {self.name} ran successfully!")
for node in self.outputs:
for module in node.downstreamModules:
if not module.running:
self.workflow.addNodeToQueue(module)
module.running = True
break
return errorCode
class DataFactory:
dataTypes: ClassVar[Dict[str, Type[Data]]] = {}
@classmethod
def getData(cls, dataNodeType: str) -> Type[Data]:
if dataNodeType in cls.dataTypes:
return cls.dataTypes[dataNodeType]
raise Exception(f"No such dataNodeType: {dataNodeType}")
@classmethod
def register(cls, dataClass: Type[Data]) -> Type[Data]:
cls.dataTypes[dataClass.type] = dataClass
return dataClass
class Data:
type: ClassVar[str] = ""
def __init__(self, name: str):
self.name = name
self.time: int = -1
self.downstreamModules: List[Module] = []
def addDownStream(self, downStream: Module):
self.downstreamModules.append(downStream)
def checkExists(self) -> bool:
pass
@DataFactory.register
class NormalFileData(Data):
type: ClassVar[str] = "NormalFileData"
def __init__(self, name: str, path: str) -> None:
super().__init__(name)
self.path = path
def checkExists(self) -> bool:
return os.path.exists(self.path)
def func(node, pool):
node.run(pool=pool)
class Workflow:
def __init__(self,
modules: Optional[Dict[str, Module]]=None,
datas: Optional[Dict[str, Data]]=None,
processors: Optional[Dict[str, Processor]]=None,
startNodes: Optional[List[Module]]=None) -> None:
super().__init__()
self.modules: Dict[str, Module] = {}
self.nFinished = 0
if modules:
for node in modules.values():
self.addNode(node)
self.datas: Dict[str, Data] = {} if not datas else datas
self.startNodes: List[Module] = [] if not startNodes else startNodes
self.processors: Dict[str, Processor] = {} if not processors else processors
self.queue = Queue() # type:ignore
def setStartNode(self, moduleNode: Module) -> None:
self.startNodes.append(moduleNode)
def addNode(self, node: Union[Module, Data]) -> None:
if isinstance(node, Data):
self.datas[node.name] = node
if isinstance(node, Module):
self.modules[node.name] = node
node.setWorkflow(self)
def addNodes(self, nodes: Iterable[Union[Module, Data]]) -> None:
for node in nodes:
self.addNode(node)
def addNodeToQueue(self, node: Module):
self.queue.put((lambda node: node.run(), (node,), {}))
def run(self, *args, **kwargs) -> None:
logger.info("Workflow start!")
class Logger:
def write(self, messages: str):
for mess in messages.strip('\n').split('\n'):
logger.info(mess)
with Timer(stdout=Logger()):
workers = []
for i in range(10):
worker = Worker(i, self)
workers.append(worker)
worker.start()
logger.debug("All workers started!")
for node in self.startNodes:
self.addNodeToQueue(node)
for worker in workers:
worker.join()
logger.info("Workflow finished!")
class Worker(threading.Thread):
def __init__(self, i: int, workflow: Workflow):
super().__init__()
self.i = i
self.workflow = workflow
self.nFinished = 0
def log(self, message, severity=logging.INFO):
if severity == logging.INFO:
logger.info(f"[Worker{self.i}]{message}")
else:
logger.debug(f"[Worker{self.i}]{message}")
def debug(self, message):
self.log(message, severity=logging.DEBUG)
def run(self):
self.debug(f"Starts to work")
while self.workflow.nFinished != len(self.workflow.modules):
if self.workflow.nFinished == len(self.workflow.modules):
self.log(f"[{self.nFinished}/{self.workflow.nFinished}] jobs are finished!")
break
try:
with Timer(descStart="Job start to run!", descEnd="Job end to run!") as timeUsed:
func, args, kwargs = self.workflow.queue.get(timeout=5)
self.debug(f"func:{func}\nargs: {args}\nkwargs: {kwargs}")
self.debug(f"Time used: {timeUsed}")
except Empty:
self.debug("Wait to get job")
continue
except Exception as e:
raise Exception(f'[Worker{self.i}]Bad execution: %s' % str(e))
try:
func(*args,**kwargs)
except Exception as e:
raise Exception(f'[Worker{self.i}]Bad execution: %s' % str(e))
else:
self.workflow.nFinished += 1
self.nFinished += 1
| catwang01/easyflow | easyflow/obj.py | obj.py | py | 8,548 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "easyflow.common.logger.setupLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Type",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "typing.Typ... |
14241805756 | from sys import exit
from time import sleep, time
from random import randint
import pygame
from pygame.constants import RESIZABLE
# Tetramino definitions on a 4x4 grid. 1 means the tile exists.
TETRAMINO_I = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), (0, 0, 0, 0)),
((0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1), (0, 0, 0, 0)),
((0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)))
TETRAMINO_J = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (0, 0, 1, 0)),
((0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (1, 0, 0, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0)))
TETRAMINO_L = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (1, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 0, 0), (0, 1, 0, 0)))
TETRAMINO_O = (((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (0, 1, 1, 0)))
TETRAMINO_S = (((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (1, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 0, 1, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (0, 1, 1, 0), (1, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 0, 1, 0)))
TETRAMINO_T = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (1, 1, 0, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (1, 1, 1, 0), (0, 0, 0, 0)),
((0, 0, 0, 0), (0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 0, 0)))
TETRAMINO_Z = (((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0), (0, 1, 0, 0)),
((0, 0, 0, 0), (0, 0, 0, 0), (1, 1, 0, 0), (0, 1, 1, 0)),
((0, 0, 0, 0), (0, 0, 1, 0), (0, 1, 1, 0), (0, 1, 0, 0)))
# Array used for randomly picking tetraminos
TETRAMINOS = [(TETRAMINO_I, (0xFF, 0xFF, 0x00)), (TETRAMINO_J, (0xFF, 0x00, 0x00)),
(TETRAMINO_L, (0xFF, 0x00, 0xFF)), (TETRAMINO_O, (0x00, 0xFF, 0x00)),
(TETRAMINO_S, (0x00, 0xFF, 0xFF)), (TETRAMINO_T, (0x00, 0x00, 0xFF)),
(TETRAMINO_Z, (0x01, 0x82, 0x50))]
# Constant colors
COLOR_BACKGROUND = (0x22, 0x22, 0x22)
COLOR_SHADOW = (0x44, 0x44, 0x44)
COLOR_BORDER = (0xAA, 0xAA, 0xAA)
COLOR_FLASH = (0xFF, 0xFF, 0xFF)
COLOR_PAUSE = (0x00, 0x00, 0x00)
COLOR_TEXT = (0xFF, 0xFF, 0xFF)
# Max framerate
FRAMERATE = 1 / 60
# Time to show that a line has been cleared
FLASH_TIME = 0.5
PREVIEW_OFFSET = 4
KEYDOWN_TIME_CONST = 0.036
# Definition for a tile
class TetrisTile(pygame.Rect):
def __init__(self, left, top, width, height, empty, color):
super().__init__(left, top, width, height)
self.empty = empty
self.color = color
class TetrisGame:
def __init__(self):
self.width = 500
self.height = 500
self.rows = 22
self.cols = 10
self.speed = 0.7
self.scale = 11
self.tile_length = 15
self.fallspeed = 1
pygame.init()
self.screen = pygame.display.set_mode(
(self.width, self.height), RESIZABLE)
# Loop gameplay until the player closes the window
# Initialize grid
welcome = True
while True:
self.grid = self.grid = [
[None] * self.cols for _ in range(self.rows)
]
for y in range(self.rows):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
self.grid[y][x] = TetrisTile(
dx, dy, self.tile_length, self.tile_length, True, COLOR_BACKGROUND
)
# Create the grid for the tetris tile preview
self.preview_grid = [[None] * 4 for _ in range(4)]
for y in range(4):
for x in range(4):
dy = y * self.tile_length
dx = x * self.tile_length + \
(self.cols + PREVIEW_OFFSET) * self.tile_length
self.preview_grid[y][x] = pygame.Rect(
dx, dy, self.tile_length, self.tile_length)
# Draw the board
self.draw_everything(init=True, resize=True, welcome=welcome)
pygame.display.flip()
# Initial wait for user to start the game
if welcome:
welcome = False
new_game = False
while not new_game:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, welcome=True, init=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
new_game = True
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
self.draw_everything(init=True)
# Start the game
self.eventloop()
self.draw_everything(gameover=True)
pygame.display.flip()
new_game = False
while not new_game:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, gameover=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
new_game = True
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Main event loop. Will block until the game ends.
def eventloop(self):
self.next_tetramino = None
self.cur_keydown = None
self.keydown_time = None
self.do_next_tetramino()
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw(next=True)
self.draw()
pygame.display.flip()
gravity_time = time()
while True:
frame_time = time()
self.handle_events()
if time() - gravity_time >= self.fallspeed:
if self.can_be_placed(y=self.y + 1):
self.draw(color=COLOR_BACKGROUND, y=self.lowest_y())
self.draw(color=COLOR_BACKGROUND)
self.y += 1
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
else:
self.place()
self.do_next_tetramino()
self.draw(next=True)
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
if not self.can_be_placed():
return
gravity_time = time()
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Handle game and window controls
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True)
elif event.type == pygame.KEYUP:
self.cur_keydown = None
self.keydown_time = None
elif event.type == pygame.KEYDOWN:
self.cur_keydown = event.key
self.keydown_time = time()
if event.key in (pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT):
self.move(event.key)
if event.key == pygame.K_SPACE:
self.autoplace()
if event.key == pygame.K_RETURN:
self.pause()
if self.cur_keydown == pygame.K_DOWN and self.keydown_time and time() - self.keydown_time >= KEYDOWN_TIME_CONST:
self.keydown_time = time()
if self.cur_keydown == pygame.K_DOWN:
self.move(self.cur_keydown)
def pause(self):
self.draw_everything(paused=True)
pygame.display.flip()
while True:
frame_time = time()
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.sysexit()
elif event.type == pygame.WINDOWRESIZED:
self.draw_everything(resize=True, paused=True)
pygame.display.flip()
elif event.type == pygame.KEYDOWN and event.key == pygame.K_RETURN:
self.draw_everything()
pygame.display.flip()
return
delta = time() - frame_time
if delta < FRAMERATE:
sleep(FRAMERATE - delta)
# Move the current tetramino in a given direction based on user input
def move(self, direction):
newx = self.x
newy = self.y
newr = self.r
if direction == pygame.K_DOWN:
newy += 1
elif direction == pygame.K_UP:
newr = (self.r + 1) % 4
elif direction == pygame.K_LEFT:
newx -= 1
elif direction == pygame.K_RIGHT:
newx += 1
if self.can_be_placed(x=newx, y=newy, r=newr):
self.draw(color=COLOR_BACKGROUND, y=self.lowest_y())
self.draw(color=COLOR_BACKGROUND)
self.x, self.y, self.r = newx, newy, newr
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
# Draw the current tetramino
# kwargs modify x, y, r drawn
def draw(self, **kwargs):
if 'next' in kwargs and kwargs['next']:
dy = 2
dx = 0
for row in self.next_tetramino[0][2:]:
for i in row:
if i:
pygame.draw.rect(
self.screen, self.next_color, self.preview_grid[dy][dx])
else:
pygame.draw.rect(
self.screen, COLOR_BACKGROUND, self.preview_grid[dy][dx])
dx += 1
dy += 1
dx = 0
elif not None in kwargs.values():
dy = self.y if 'y' not in kwargs else kwargs['y']
dx = self.x if 'x' not in kwargs else kwargs['x']
color = self.color if 'color' not in kwargs else kwargs['color']
for row in self.tetramino[self.r if 'r' not in kwargs else kwargs['r']]:
for i in row:
if i:
pygame.draw.rect(self.screen, color, self.grid[dy][dx])
dx += 1
dy += 1
dx = self.x if 'x' not in kwargs else kwargs['x']
# Place the current tetramino
def place(self):
dy = self.y
dx = self.x
for row in self.tetramino[self.r]:
for i in row:
if i:
self.grid[dy][dx].empty = False
self.grid[dy][dx].color = self.color
dx += 1
dy += 1
dx = self.x
self.lineclear()
# Place the current tetramino immediately (pressing spacebar)
def autoplace(self):
self.draw(color=COLOR_BACKGROUND)
self.y = self.lowest_y()
self.draw()
self.place()
self.do_next_tetramino()
self.draw(next=True)
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
pygame.display.flip()
# Clear filled rows
def lineclear(self):
to_clear = []
not_to_clear = []
for row in self.grid:
if any(tile.empty for tile in row):
not_to_clear.append(row)
else:
to_clear.append(row)
# Return if nothing to do
if len(to_clear) == 0:
return
# Do a flash "animation"
for row in to_clear:
pygame.draw.rect(self.screen, COLOR_FLASH, pygame.Rect(
row[0].left, row[0].top, row[-1].left +
self.tile_length - row[0].left, self.tile_length
))
pygame.display.flip()
sleep(FLASH_TIME / 3)
for row in to_clear:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
pygame.display.flip()
sleep(FLASH_TIME / 3)
for row in to_clear:
pygame.draw.rect(self.screen, COLOR_FLASH, pygame.Rect(
row[0].left, row[0].top, row[-1].left +
self.tile_length - row[0].left, self.tile_length
))
pygame.display.flip()
sleep(FLASH_TIME / 3)
# self.grid is now a reference to to_clear
# rows in not_to_clear will be added later
self.grid = to_clear
amt_rows_cleared = len(to_clear)
# Reset rows in to_clear to blank and move them to the top
for y in range(len(to_clear)):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
to_clear[y][x].empty = True
to_clear[y][x].color = COLOR_BACKGROUND
to_clear[y][x].update(
dx, dy, self.tile_length, self.tile_length)
# Update the existing rows
for i in range(len(not_to_clear)):
for x in range(self.cols):
dy = (i + amt_rows_cleared) * \
self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
not_to_clear[i][x].update(
dx, dy, self.tile_length, self.tile_length)
self.grid.append(not_to_clear[i])
# Finally, redraw everything
for row in self.grid:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
pygame.display.flip()
# Select a new random tetramino
def do_next_tetramino(self):
if self.next_tetramino:
self.tetramino = self.next_tetramino
self.color = self.next_color
else:
i = randint(0, len(TETRAMINOS) - 1)
self.tetramino = TETRAMINOS[i][0]
self.color = TETRAMINOS[i][1]
i = randint(0, len(TETRAMINOS) - 1)
self.next_tetramino = TETRAMINOS[i][0]
self.next_color = TETRAMINOS[i][1]
self.x = (self.cols - 1) // 2 - 1
self.y = 0
self.r = 0
if self.fallspeed > 0.1:
self.fallspeed -= 0.005
elif self.fallspeed > 0.05:
self.fallspeed -= 0.0001
# Calculate the lowest (greatest) possible y value for the current tetramino
def lowest_y(self):
dy = self.y + 1
while self.can_be_placed(y=dy):
dy += 1
dy -= 1
return dy
# Return True/False if the current tetramino can/can't be place in its current position
# Modify x, y, or the rotation depending on kwargs
def can_be_placed(self, **kwargs):
dy = self.y if not 'y' in kwargs else kwargs['y']
dx = self.x if not 'x' in kwargs else kwargs['x']
dr = self.r if not 'r' in kwargs else kwargs['r']
for row in self.tetramino[dr]:
for i in row:
if i:
if (dy not in range(0, self.rows) or dx not in range(0, self.cols)) or not self.grid[dy][dx].empty:
return False
dx += 1
dy += 1
dx = self.x if not 'x' in kwargs else kwargs['x']
return True
def draw_everything(self, **kwargs):
if kwargs.get('resize'):
width, height = self.screen.get_size()
t_h = height // (self.rows + 2)
t_w = width // (self.cols + PREVIEW_OFFSET + 6)
new_tile_length = min(t_h, t_w)
if new_tile_length != self.tile_length:
self.tile_length = new_tile_length
for y in range(self.rows):
for x in range(self.cols):
dy = y * self.tile_length + self.tile_length
dx = x * self.tile_length + self.tile_length
self.grid[y][x].update(
dx, dy, self.tile_length, self.tile_length
)
for y in range(4):
for x in range(4):
dy = y * self.tile_length
dx = x * self.tile_length + \
(self.cols + PREVIEW_OFFSET) * self.tile_length
self.preview_grid[y][x].update(
dx, dy, self.tile_length, self.tile_length
)
self.screen.fill(COLOR_BACKGROUND)
border = pygame.Rect(0, 0,
self.tile_length * (self.cols + 2),
self.tile_length * (self.rows + 2))
pygame.draw.rect(self.screen, COLOR_BORDER, border)
if kwargs.get('paused'):
curtain = pygame.Rect(
self.tile_length,
self.tile_length,
self.cols * self.tile_length,
self.rows * self.tile_length
)
pygame.draw.rect(self.screen, COLOR_PAUSE, curtain)
font1 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.7)
)
font2 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.3)
)
s1 = font1.render("PAUSED", True, COLOR_TEXT)
s2 = font2.render("PRESS ENTER", True, COLOR_TEXT)
s3 = font2.render("TO UNPAUSE", True, COLOR_TEXT)
self.screen.blit(s1, (
(self.tile_length * (self.cols // 2) + self.tile_length) -
s1.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) - + s1.get_size()[1]
))
self.screen.blit(s2, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s2.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) + s2.get_size()[1] // 2
))
self.screen.blit(s3, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s3.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) +
s2.get_size()[1] // 2 + s3.get_size()[1]
))
else:
for row in self.grid:
for tile in row:
pygame.draw.rect(self.screen, tile.color, tile)
if not kwargs.get('init'):
self.draw(color=COLOR_SHADOW, y=self.lowest_y())
self.draw()
self.draw(next=True)
if kwargs.get('gameover') or kwargs.get('welcome'):
font1 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 1.5)
)
font2 = pygame.font.Font(
'freesansbold.ttf',
int(self.tile_length * 0.9)
)
s1 = font1.render(
"GAME OVER" if kwargs.get('gameover') else "WELCOME",
True,
COLOR_TEXT
)
s2 = font2.render("PRESS ENTER TO", True, COLOR_TEXT)
s3 = font2.render("START A NEW GAME", True, COLOR_TEXT)
text_begin = (self.tile_length * (self.rows // 2)
) - + s1.get_size()[1]
text_end = (self.tile_length * (self.rows // 2)) + \
s2.get_size()[1] // 2 + s3.get_size()[1]
background = pygame.Rect(
self.tile_length,
text_begin - self.tile_length,
self.cols * self.tile_length,
(text_end + s3.get_size()[1] + self.tile_length) -
(text_begin - self.tile_length)
)
pygame.draw.rect(self.screen, COLOR_PAUSE, background)
self.screen.blit(s1, (
(self.tile_length * (self.cols // 2) + self.tile_length) -
s1.get_size()[0] // 2,
text_begin
))
self.screen.blit(s2, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s2.get_size()[0] // 2,
(self.tile_length * (self.rows // 2)) +
s2.get_size()[1] // 2
))
self.screen.blit(s3, (
(self.tile_length * (self.cols // 2) + self.tile_length)
- s3.get_size()[0] // 2,
text_end
))
font = pygame.font.Font(
'freesansbold.ttf', int(self.tile_length * 1.5))
text_next = font.render("NEXT", True, COLOR_TEXT)
self.screen.blit(text_next,
(self.tile_length * (self.cols + PREVIEW_OFFSET), self.tile_length // 2))
def sysexit(self):
pygame.quit()
exit()
if __name__ == "__main__":
TetrisGame()
| dmcdo/Pygame-Games | tetris.pyw | tetris.pyw | pyw | 22,293 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.Rect",
"line_number": 62,
"usage_type": "attribute"
},
{
"api_name": "pygame.init",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "pygame.constants... |
14703280517 | from datetime import datetime
from os.path import dirname, join
import pytest
from city_scrapers_core.constants import COMMISSION
from city_scrapers_core.utils import file_response
from freezegun import freeze_time
from city_scrapers.spiders.sf_planning import SfPlanningSpider
test_response = file_response(
join(dirname(__file__), "files", "sf_planning.html"),
url="https://sfplanning.org/event/planning-commission-151",
)
spider = SfPlanningSpider()
freezer = freeze_time("2021-10-27")
freezer.start()
parsed_items = [item for item in spider.parse_meeting(test_response)]
freezer.stop()
def test_title():
assert parsed_items[0]["title"] == "Hearing for SF Planning Commission"
def test_description():
assert len(parsed_items[0]["description"]) == 7212
def test_start():
assert parsed_items[0]["start"] == datetime(2021, 10, 28, 13, 0)
def test_end():
assert parsed_items[0]["end"] is None
def test_time_notes():
assert parsed_items[0]["time_notes"] == ""
def test_id():
assert (
parsed_items[0]["id"]
== "sf_planning/202110281300/x/hearing_for_sf_planning_commission"
)
def test_status():
assert parsed_items[0]["status"] == "tentative"
def test_location():
assert parsed_items[0]["location"] == {
"address": "Stream at https://sfgovtv.org/planning – Public Comment:"
" (415) 655-0001 / Access Code: 2486 151 4664",
"name": "SF Planning Commission",
}
def test_source():
assert (
parsed_items[0]["source"]
== "https://sfplanning.org/event/planning-commission-151"
)
def test_links():
assert parsed_items[0]["links"] == [
{
"href": "https://sfplanning.org/sites/default/files/agendas/"
"2021-10/20211028_cal.pdf",
"title": "Meeting/Agenda Information",
},
{
"href": "https://sfplanning.org/resource/"
"planning-commission-packet-october-28-2021",
"title": "Supporting Documents",
},
]
def test_classification():
assert parsed_items[0]["classification"] == COMMISSION
@pytest.mark.parametrize("item", parsed_items)
def test_all_day(item):
assert item["all_day"] is False
| washabstract/city-scrapers-ca | tests/test_sf_planning.py | test_sf_planning.py | py | 2,234 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "city_scrapers_core.utils.file_response",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "c... |
5125409200 | from heatmappy import Heatmapper
from PIL import Image
import database_func as db
import img_lib
def percent_to_diameter(percent):
default = 150
if percent == 0:
return 0
elif percent <= 10:
return default
elif percent <= 20:
return default + 50
elif percent <= 30:
return default + 100
elif percent <= 40:
return default + 150
elif percent <= 50:
return default + 200
elif percent <= 60:
return default + 250
elif percent <= 70:
return default + 300
elif percent <= 80:
return default + 350
elif percent <= 90:
return default + 400
else:
return default + 450
def heatmap_creaate(user):
img_tup = db.select_user_imgstr(user)
num = 1
for img_str in img_tup:
img = img_lib.str_to_img(img_str[0])
img_lib.img_save(img, user, num)
num = num+1
points = [(320, 270), (960, 270), (1600, 270), (320, 810), (960, 810), (1660, 810)]
info = db.select_user_info(user)
# 입력 이미지 경로 설정
num = 1
for gaze in info:
img_path = 'data/' + user + '_' + str(num) + '.png'
img = Image.open(img_path)
for i in range(0, 6):
point = [points[i]]
percent = gaze[i+2]
diameter = percent_to_diameter(percent)
if diameter == 0:
continue
# 히트맵 그리기
heatmapper = Heatmapper(
point_diameter=diameter, # the size of each point to be drawn
point_strength=1, # the strength, between 0 and 1, of each point to be drawn
opacity=0.6, # the opacity of the heatmap layer
colours='default', # 'default' or 'reveal'
# OR a matplotlib LinearSegmentedColorMap object
# OR the path to a horizontal scale image
grey_heatmapper='PIL' # The object responsible for drawing the points
# Pillow used by default, 'PySide' option available if installed
)
# 이미지 위에 히트맵 그리기
heatmap = heatmapper.heatmap_on_img(point, img)
heatmap.save(img_path)
img = Image.open(img_path)
num = num + 1
| jinho17/eye_tracking_project | eye_tracking/database/heatmap.py | heatmap.py | py | 2,434 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "database_func.select_user_imgstr",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "img_lib.str_to_img",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "img_lib.img_save",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "... |
5308409788 | ## adsbib.py
## A tool for collecting BibTeX records from NASA ADS.
##
## Call with reference to a plaintext list of bibcodes,
## separated by newlines. Output will be to the same
## filename, appended with .bib
## >> python3 ads-bib.py bibcodes
##
## Note : To strip an existing BibTeX file down to bibcodes with vim,
## :v/^@/d
## :%s/@.*{//g
## :%s/,//g
import ads
## Setup the argument parser
import argparse
parser = argparse.ArgumentParser(description='bibcode to import')
parser.add_argument('bibcode', help='A bibcode for input')
args = parser.parse_args()
## Read bibcode input from file if not specified
#bibcode = args.bibcode
with open(args.bibcode) as f:
bibcode = f.read().splitlines()
f.close()
## Query ADS with the set of bibcodes
q = ads.ExportQuery(bibcodes=bibcode,format='bibtex')
bibtex = q.execute()
## Write BibTeX entries to file
with open(args.bibcode+'.bib', 'a') as bibfile:
print(bibtex, file=bibfile)
bibfile.close()
| lowderchris/ads-bib | ads-bib.py | ads-bib.py | py | 959 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "ads.ExportQuery",
"line_number": 29,
"usage_type": "call"
}
] |
13119405899 | from django.conf.urls import url, include
from . import views
from .models import *
from rest_framework import routers, permissions, serializers, viewsets
from oauth2_provider.ext.rest_framework import TokenHasReadWriteScope, TokenHasScope
class UserProfileSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = UserProfile
class UserProfileViewSet(viewsets.ModelViewSet):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = UserProfile.objects.all()
serializer_class = UserProfileSerializer
router = routers.DefaultRouter()
router.register(r'users', UserProfileViewSet)
# router.register(r'leagues', views.league_list)
urlpatterns = [
url(r'^index', views.index),
url(r'^', include(router.urls)),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^leagues/$', views.LeagueList.as_view()),
url(r'^leagues/(?P<pk>[0-9]+)/$', views.LeagueDetail.as_view()),
]
| dammahom/matchpredict | gameapi/urls.py | urls.py | py | 1,075 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.HyperlinkedModelSerializer",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 13,... |
21959415638 | from fastapi import APIRouter, HTTPException
from init_system import system
from schemas.customer_shcema import SignIn, SignUp, SetCart, Email
from models.Cart import CartItem
router = APIRouter(prefix="/customer")
@router.post("/sign_in")
async def customer_login(body: SignIn):
try:
return {
"detail": "Successfully Sign-in",
"data": system.sign_in(body.email, body.password),
}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/sign_up")
async def customer_register(body: SignUp):
try:
email = body.email
password = body.password
firstname = body.firstname
lastname = body.lastname
customer = system.create_customer(email, password, firstname, lastname)
return {"detail": "Successfully Sign-up", "data": customer.email}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/set_cart_item")
async def add_cart_item(body: SetCart):
try:
email = body.email
product_list = body.product_list
customer = system.get_customer_by_email(email)
if not customer:
raise ValueError("There is no customer with this email.")
cart = customer.cart
cart_items = []
for item in product_list:
category = system.get_category_by_name(item.category)
product = category.get_product_by_id(item.id)
cart_items.append(CartItem(product, item.quantity))
cart.cart_items = cart_items
return {"detail": "Successfully added."}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
@router.post("/get_cart_detail")
async def view_cart(body: Email):
try:
email = body.email
customer = system.get_customer_by_email(email)
if not customer:
raise ValueError("There is no customer with this email.")
cart = customer.cart
return {"data": cart.get_detail()}
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=str(e))
| Dope21/python-oop | controllers/customer_ctrl.py | customer_ctrl.py | py | 2,486 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "schemas.customer_shcema.SignIn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "init_system.system.sign_in",
"line_number": 14,
"usage_type": "call"
},
{
"api_na... |
34892241691 | from logging import raiseExceptions
from flask import Flask, request, make_response, jsonify
from flask_cors import CORS, cross_origin
import hashlib
from controller import *
app = Flask(__name__)
CORS(app)
Controller = Controller()
@app.route("/ong", methods=["GET", "POST", "PUT"])
@cross_origin()
def ong():
"""
This methods returns a list of all ONGs from
ONGs public table.
"""
if request.method == "POST":
try:
payload = request.get_json()
hashed_senha = hashlib.md5(payload['senha'].encode('utf-8')).hexdigest()
response = Controller.create_ong(
cnpj=payload['cnpj'] if 'cnpj' in payload else '',
nome=payload['nome'],
descricao=payload['descricao'] ,
tipo=payload['tipo'] if 'tipo' in payload else '',
telefone=payload['telefone'] if 'telefone' in payload else '',
email=payload['email'],
endereco_cep=payload['endereco_cep'] if 'endereco_cep' in payload else '',
endereco_num=payload['endereco_num'] if 'endereco_num' in payload else -1,
endereco_complemento=payload['endereco_complemento'] if 'endereco_completo' in payload else '',
senha=hashed_senha
)
response = jsonify(response)
# # response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
elif request.method == "PUT":
try:
payload = request.get_json()
if not set(['id_ong']).issubset(payload):
raise Exception('Id obrigatórios')
if 'senha' in payload:
payload['senha'] = hashlib.md5(payload['senha'].encode('utf-8')).hexdigest()
response = Controller.update_ong(
payload=payload
)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
elif request.method == "GET":
try:
response = Controller.get_all_ongs()
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/ong/<id>", methods=["GET"])
@cross_origin()
def get_ong(id):
"""
This method returns the ong with ong
"""
try:
response = Controller.get_ong(id)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/ong/<id>", methods=["DELETE"])
@cross_origin()
def delete_ong(id):
try:
Controller.delete_ong(id)
response = {"Sucesso: ONG has been deleted"}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 200))
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return(make_response(response, 400))
@app.route("/login", methods=["POST"])
@cross_origin()
def login():
payload = request.get_json()
email = payload["email"]
senha = payload["senha"]
tipo = payload["tipo"]
try:
response = Controller.login(email, senha, tipo)
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return response
except Exception as e:
response = {"Erro": e}
response = jsonify(response)
# response.headers.add('Access-Control-Allow-Origin', '*')
return make_response(response, 400)
@app.route("/searchong", methods=["POST"])
@cross_origin()
def search_ong():
payload = request.get_json()
causa = payload["causa"] if "causa" in payload else None
nome = payload["nome"] if "nome" in payload else None
return Controller.search_ong(causa, nome)
if __name__ == "__main__":
app.run(debug=True)
| BrunoTaufner/RPII | server/app.py | app.py | py | 4,960 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
... |
12211334459 | '''Some helper functions for PyTorch, including:
- get_mean_and_std: calculate the mean and std value of dataset.
- msr_init: net parameter initialization.
- progress_bar: progress bar mimic xlua.progress.
'''
import os
import sys
import time
import math
import torch
import torchvision
import torch.nn as nn
import torch.nn.init as init
import torchvision.transforms as transforms
from TinyImageNetDataset import TinyImageNetDataset
def get_mean_and_std(dataset):
'''Compute the mean and std value of dataset.'''
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True, num_workers=2)
mean = torch.zeros(3)
std = torch.zeros(3)
print('==> Computing mean and std..')
for inputs, targets in dataloader:
for i in range(3):
mean[i] += inputs[:,i,:,:].mean()
std[i] += inputs[:,i,:,:].std()
mean.div_(len(dataset))
std.div_(len(dataset))
return mean, std
def init_params(net):
'''Init layer parameters.'''
for m in net.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_normal(m.weight, mode='fan_out')
if m.bias:
init.constant(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant(m.weight, 1)
init.constant(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal(m.weight, std=1e-3)
if m.bias:
init.constant(m.bias, 0)
_, term_width = os.popen('stty size', 'r').read().split()
term_width = int(term_width)
TOTAL_BAR_LENGTH = 65.
last_time = time.time()
begin_time = last_time
def progress_bar(current, total, msg=None):
global last_time, begin_time
if current == 0:
begin_time = time.time() # Reset for new bar.
cur_len = int(TOTAL_BAR_LENGTH*current/total)
rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
sys.stdout.write(' [')
for i in range(cur_len):
sys.stdout.write('=')
sys.stdout.write('>')
for i in range(rest_len):
sys.stdout.write('.')
sys.stdout.write(']')
cur_time = time.time()
step_time = cur_time - last_time
last_time = cur_time
tot_time = cur_time - begin_time
L = []
L.append(' Step: %s' % format_time(step_time))
L.append(' | Tot: %s' % format_time(tot_time))
if msg:
L.append(' | ' + msg)
msg = ''.join(L)
sys.stdout.write(msg)
for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
sys.stdout.write(' ')
# Go back to the center of the bar.
for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
sys.stdout.write('\b')
sys.stdout.write(' %d/%d ' % (current+1, total))
if current < total-1:
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
sys.stdout.flush()
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def save_checkpoint(state, is_best, filename):
torch.save(state, filename + ".pth.tar")
if is_best:
shutil.copyfile(filename + ".pth.tar", filename + "_best.pth.tar")
def load_checkpoint(path, model, optimizer=None):
if os.path.isfile(path):
logging.info("=== loading checkpoint '{}' ===".format(path))
checkpoint = torch.load(path)
model.load_state_dict(checkpoint["state_dict"], strict=False)
if optimizer is not None:
best_prec = checkpoint["best_prec"]
last_epoch = checkpoint["last_epoch"]
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=== done. also loaded optimizer from "
+ "checkpoint '{}' (epoch {}) ===".format(path, last_epoch + 1)
)
return best_prec, last_epoch
def get_data_loader(transform_train, transform_test, config):
# assert config.dataset == "cifar10" or config.dataset == "cifar100"
if config.dataset == "cifar10":
trainset = torchvision.datasets.CIFAR10(
root=config.data_path, train=True, download=True, transform=transform_train
)
testset = torchvision.datasets.CIFAR10(
root=config.data_path, train=False, download=True, transform=transform_test
)
elif config.dataset == "cifar100":
trainset = torchvision.datasets.CIFAR100(
root=config.data_path, train=True, download=True, transform=transform_train
)
testset = torchvision.datasets.CIFAR100(
root=config.data_path, train=False, download=True, transform=transform_test
)
elif config.dataset == "tiny-imagenet":
trainset = TinyImageNetDataset(
root=config.data_path, download=True, mode='train', task='classification', transform=transform_train
)
testset = TinyImageNetDataset(
root=config.data_path, download=True, mode='val', task ='classification', transform=transform_test
)
train_loader = torch.utils.data.DataLoader(
trainset, batch_size=config.batch_size, shuffle=True, num_workers=config.workers
)
test_loader = torch.utils.data.DataLoader(
testset, batch_size=config.test_batch, shuffle=False, num_workers=config.workers
)
return train_loader, test_loader
def data_augmentation(config, is_train=True):
aug = []
if is_train:
# random crop
if config.augmentation.random_crop:
aug.append(transforms.RandomCrop(config.input_size, padding=4))
# horizontal filp
if config.augmentation.random_horizontal_filp:
aug.append(transforms.RandomHorizontalFlip())
aug.append(transforms.ToTensor())
# normalize [- mean / std]
if config.augmentation.normalize:
if config.dataset == "cifar10":
aug.append(
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
)
elif config.dataset == "cifar100":
aug.append(
transforms.Normalize((0.5071, 0.4867, 0.4408), (0.2675, 0.2565, 0.2761))
)
elif config.dataset == "tiny-imagenet":
aug.append(
transforms.Normalize((0.4775, 0.4806, 0.4805), (0.1592, 0.1611, 0.1653))
)
if is_train and config.augmentation.cutout:
# cutout
aug.append(
Cutout(n_holes=config.augmentation.holes, length=config.augmentation.length)
)
return aug
| zarekxu/QuadraLib | image_classification/utils.py | utils.py | py | 7,113 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.utils",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "torch.zeros",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.zeros"... |
26109711840 | """
The customers resource is a representation of the customer accounts.
All the REST API calls to the Customer or the Address Database are housed here.
Customers Service with Swagger and Flask RESTX
Paths:
------
GET / - Displays a UI for Selenium testing
GET /customers - Lists a list all of Customers
GET /customers/{customer_id} - Reads the Customer with given Customer ID
POST /customers - Creates a new Customer in the database
PUT /customers/{customer_id} - Updates a Customer with given customer ID
DELETE /customers/{customer_id} - Deletes a Customer with given ID
GET /customers/{customer_id}/addresses - Lists all the addresses of the customer with given ID
GET /customers/{customer_id}/addresses/{address_id} - Reads the Address with given ID of the customer with given ID
POST /customers/{customer_id}/addresses - Creates a new address of the customer with given Customer ID
PUT /customers/{customer_id}/addresses/{address_id} - Updates the address with given address ID of customer with given ID
DELETE /customers/{customer_id}/addresses/{address_id} - Deletes the address with given address ID of customer with given ID
PUT /customers/{customer_id}/activate - Activates a Customer with given Customer ID
PUT /customers/{customer_id}/deactivate - Deactivates a Customer with given Customer ID
"""
# pylint: disable=cyclic-import
from flask import jsonify
# from flask_restx import Api, Resource
from flask_restx import fields, reqparse, inputs, Resource
from service.common import status # HTTP Status Codes
from service.models import Customer, Address
# Import Flask application
from . import app, api
create_address_model = api.model('Address', {
'street': fields.String(required=True, description='The address street'),
'city': fields.String(required=True, description='The address city'),
'state': fields.String(required=True, description='The address state'),
'country': fields.String(description='The address country'),
'pin_code': fields.String(required=True, description='The address pin code'),
'customer_id': fields.Integer(required=True, description='The customer ID corresponding to the Address')
})
address_model = api.inherit(
'AddressModel',
create_address_model,
{
'address_id': fields.Integer(readOnly=True, description='The unique id assigned internally by service')
}
)
create_customer_model = api.model('Customer', {
'first_name': fields.String(required=True, description='The First Name of the customer'),
'last_name': fields.String(required=True, description='The Last Name of the customer'),
'password': fields.String(required=True, description='The password of the customer'),
'email': fields.String(required=True, description='The email of the customer'),
'active': fields.Boolean(required=True, description='The active/inactive state of the customer'),
'addresses': fields.List(fields.Nested(address_model,
required=False,
description='List of addresses that the customer has'))
})
customer_model = api.inherit(
'CustomerModel',
create_customer_model,
{
'id': fields.Integer(readOnly=True, description='The unique id assigned internally by service'),
}
)
# query string arguments
customer_args = reqparse.RequestParser()
customer_args.add_argument('first_name', type=str, location='args', required=False, help='Find Customers by First Name')
customer_args.add_argument('last_name', type=str, location='args', required=False, help='Find Customers by Last Name')
customer_args.add_argument('email', type=str, location='args', required=False, help='Find Customers by Email')
customer_args.add_argument('active', type=inputs.boolean, location='args', required=False, help='Is the Customer active?')
customer_args.add_argument('street', type=str, location='args', required=False, help='Find Customers by Address street')
customer_args.add_argument('city', type=str, location='args', required=False, help='Find Customers by Address city')
customer_args.add_argument('state', type=str, location='args', required=False, help='Find Customers by Address state')
customer_args.add_argument('country', type=str, location='args', required=False, help='Find Customers by Address country')
customer_args.add_argument('pin_code', type=str, location='args', required=False, help='Find Customers by Address Pin Code')
############################################################
# Health Endpoint
############################################################
@app.route("/health")
def health():
"""Health Status"""
return jsonify(dict(status="OK")), status.HTTP_200_OK
######################################################################
# GET INDEX
######################################################################
@app.route('/')
def index():
"""Root URL response"""
app.logger.info("Request for Root URL")
return app.send_static_file('index.html')
######################################################################
# R E S T A P I E N D P O I N T S
######################################################################
######################################################################
# PATH: /customers/{customer_id}
######################################################################
@api.route('/customers/<int:customer_id>')
@api.param('customer_id', 'The Customer identifier')
class CustomerResource(Resource):
"""
CustomerResource class
Allows the manipulation of a single customer
GET /customer{customer_id} - Returns a Customer with the customer_id
PUT /customer{customer_id} - Update a Customer with the customer_id
DELETE /customer{customer_id} - Deletes a Customer with the customer_id
"""
# ------------------------------------------------------------------
# RETRIEVE A CUSTOMER
# ------------------------------------------------------------------
@api.doc('get_customers')
@api.response(404, 'Customer not found')
@api.marshal_with(customer_model)
def get(self, customer_id):
"""
Retrieve a single Customer
This endpoint will return a Customer based on its ID.
"""
app.logger.info("Request to Retrieve a Customer with id [%s]", customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
app.logger.info('Returning customer: %s', customer.id)
return customer.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# UPDATE AN EXISTING CUSTOMER
# ------------------------------------------------------------------
@api.doc('update_customers')
@api.response(404, 'Customer not found')
@api.response(400, 'The posted Customer data was not valid')
@api.expect(customer_model)
@api.marshal_with(customer_model)
def put(self, customer_id):
"""
Update a Customer
This endpoint will update a Customer based on the body that is posted.
"""
app.logger.info('Request to Update a Customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
original_password = None
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
else:
original_password = customer.password
app.logger.debug('Payload = %s', api.payload)
data = api.payload
customer.deserialize(data)
customer.id = customer_id
customer.update(original_password)
app.logger.info('Customer with ID [%s] updated.', customer.id)
return customer.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# DELETE A CUSTOMER
# ------------------------------------------------------------------
@api.doc('delete_customers')
@api.response(204, 'Customer deleted')
def delete(self, customer_id):
"""
Delete a Customer
This endpoint will delete a Customer based on the ID specified in the path.
"""
app.logger.info('Request to Delete a Customer with id [%s]', customer_id)
customer = Customer.find(customer_id)
if customer:
customer.delete()
app.logger.info('Customer with id [%s] was deleted', customer_id)
return '', status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /customers
######################################################################
@api.route('/customers', strict_slashes=False)
class CustomerCollection(Resource):
""" Handles all interactions with collections of Customers """
# ------------------------------------------------------------------
# LIST ALL CUSTOMERS
# ------------------------------------------------------------------
@api.doc('list_customers')
@api.expect(customer_args, validate=True)
@api.marshal_list_with(customer_model)
def get(self):
"""
Lists all of the Customers
This endpoint will list all the customers.
"""
app.logger.info('Request to list customers...')
customers = []
args = customer_args.parse_args()
if args['first_name']:
app.logger.info('Filtering by first name: %s', args['first_name'])
customers = Customer.find_by_first_name(args['first_name'])
elif args['last_name']:
app.logger.info('Filtering by last name: %s', args['last_name'])
customers = Customer.find_by_last_name(args['last_name'])
elif args['active'] is not None:
app.logger.info('Filtering by active state: %s', args['active'])
customers = Customer.find_by_active(args['active'])
elif args['email']:
app.logger.info('Filtering by email: %s', args['email'])
customers = Customer.find_by_email(args['email'])
elif args['street']:
app.logger.info('Filtering by street: %s', args['street'])
customers = Address.find_by_street(args['street'])
elif args['city']:
app.logger.info('Filtering by city: %s', args['city'])
customers = Address.find_by_city(args['city'])
elif args['state']:
app.logger.info('Filtering by state: %s', args['state'])
customers = Address.find_by_state(args['state'])
elif args['country']:
app.logger.info('Filtering by country: %s', args['country'])
customers = Address.find_by_country(args['country'])
elif args['pin_code']:
app.logger.info('Filtering by pin code: %s', args['pin_code'])
customers = Address.find_by_pin_code(args['pin_code'])
else:
app.logger.info('Returning unfiltered list.')
customers = Customer.all()
# app.logger.info('[%s] Customers returned', len(customers))
results = [customer.serialize() for customer in customers]
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW CUSTOMER
# ------------------------------------------------------------------
@api.doc('create_customers')
@api.response(400, 'The posted data was not valid')
@api.expect(create_customer_model)
@api.marshal_with(customer_model, code=201)
def post(self):
"""
Creates a Customer
This endpoint will create a Customer based on the data in the body that is posted.
"""
app.logger.info('Request to Create a Customer')
customer = Customer()
app.logger.debug('Payload = %s', api.payload)
customer.deserialize(api.payload)
customer.create()
app.logger.info('Customer with new id [%s] created!', customer.id)
location_url = api.url_for(CustomerResource, customer_id=customer.id, _external=True)
return customer.serialize(), status.HTTP_201_CREATED, {'Location': location_url}
######################################################################
# Activate / Deactivate Customer
######################################################################
######################################################################
# PATH: /customers/{customer_id}/activate
######################################################################
@api.route('/customers/<int:customer_id>/activate')
@api.param('customer_id', 'The Customer identifier')
class ActivateResource(Resource):
""" Activate actions on a Customer """
@api.doc('activate_customers')
@api.response(404, 'Customer not found')
def put(self, customer_id):
"""
Activate a Customer
This endpoint will activate a Customer.
"""
app.logger.info(f'Request to Activate a Customer with ID: {customer_id}')
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f'Customer with id [{customer_id}] was not found.')
customer.id = customer_id
customer.active = True
customer.update()
app.logger.info('Customer with id [%s] has been activated!', customer.id)
return customer.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /customers/{customer_id}/deactivate
######################################################################
@api.route('/customers/<int:customer_id>/deactivate')
@api.param('customer_id', 'The Customer identifier')
class DeactivateResource(Resource):
""" Deactivate actions on a Customer """
@api.doc('deactivate_customers')
@api.response(404, 'Customer not found')
def put(self, customer_id):
"""
Deactivate a Customer
This endpoint will deactivate a Customer.
"""
app.logger.info(f'Request to Deactivate a Customer with ID: {customer_id}')
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f'Customer with id [{customer_id}] was not found.')
customer.id = customer_id
customer.active = False
customer.update()
app.logger.info('Customer with id [%s] has been deactivated!', customer.id)
return customer.serialize(), status.HTTP_200_OK
######################################################################
# PATH: /customers/{customer_id}/addresses/{address_id}
######################################################################
@api.route('/customers/<int:customer_id>/addresses/<int:address_id>')
@api.param('customer_id', 'The Customer identifier')
@api.param('address_id', 'The Address identifier')
class AddressResource(Resource):
"""
AddressResource class
Allows the manipulation of a single Address
GET /customers/{customer_id}/addresses/{address_id} - Returns an Address with the id
PUT /customers/{customer_id}/addresses/{address_id} - Update an Address with the id
DELETE /customers/{customer_id}/addresses/{address_id} - Deletes an Address with the id
"""
# ------------------------------------------------------------------
# RETRIEVE AN ADDRESS
# ------------------------------------------------------------------
@api.doc('get_addresses')
@api.marshal_with(address_model)
@api.response(404, 'Address not found')
def get(self, address_id, customer_id):
"""
Retrieve an address
This endpoint will return an address from a customer based on its ID.
"""
app.logger.info('Request to retrieve an Address %s from Customer with id: %s', address_id, customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(
status.HTTP_404_NOT_FOUND,
f"Customer with id '{customer_id}' was not found.",
)
address = Address.find(address_id)
if not address or address.customer_id != customer.id:
abort(
status.HTTP_404_NOT_FOUND,
f"Address with id '{address_id}' could not be found for the customer with id {customer.id}.",
)
app.logger.info('Returning address: %s', address.address_id)
return address.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# UPDATE AN EXISTING ADDRESS
# ------------------------------------------------------------------
@api.doc('update_addresses')
@api.response(404, 'Address not found')
@api.expect(address_model)
@api.marshal_with(address_model)
def put(self, address_id, customer_id):
"""
Update an address of a customer
This endpoint will update an Address based on the body that is posted.
"""
app.logger.info('Request to Address with address_id [%s] and customer_id [%s] ...', address_id, customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
# Find customer address with address_id
addr_to_update = None
for addr in customer.addresses:
if addr.address_id == address_id:
addr_to_update = addr
break
# if not found
if not addr_to_update:
abort(status.HTTP_404_NOT_FOUND, f"Address id '{address_id}' not found for customer '{customer_id}'.")
data = api.payload
addr_to_update.deserialize(data)
addr_to_update.address_id = address_id
addr_to_update.customer_id = customer_id
addr_to_update.update()
app.logger.info('Address with address_id [%s] and customer_id [%s] updated.', address_id, customer.id)
return addr_to_update.serialize(), status.HTTP_200_OK
# ------------------------------------------------------------------
# DELETE AN ADDRESS
# ------------------------------------------------------------------
@api.doc('delete_addresses')
@api.response(204, 'Address deleted')
def delete(self, address_id, customer_id):
"""
Delete an address from a customer
This endpoint will delete an Address based on the ID specified in the path.
"""
app.logger.info('Request to delete address with address_id [%s] and customer_id [%s] ...', address_id, customer_id)
address = Address.find(address_id)
if address and address.customer_id == customer_id:
address.delete()
app.logger.info('Address with ID [%s] and customer ID [%s] delete completed.', address_id, customer_id)
return '', status.HTTP_204_NO_CONTENT
######################################################################
# PATH: /customers/{customer_id}/addresses
######################################################################
@api.route('/customers/<int:customer_id>/addresses', strict_slashes=False)
@api.param('customer_id', 'The Customer identifier')
class AddressCollection(Resource):
""" Handles all interactions with collections of addresses """
# ------------------------------------------------------------------
# LIST ALL ADDRESSES FOR A CUSTOMER
# ------------------------------------------------------------------
@api.doc('list_addresses')
@api.marshal_list_with(address_model)
def get(self, customer_id):
"""
List all of the addresses of a Customer
This endpoint will list all addresses of a Customer.
"""
app.logger.info('Request to list Addresses for Customer with id: %s', customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
results = [address.serialize() for address in customer.addresses]
app.logger.info("Returning %d addresses", len(results))
return results, status.HTTP_200_OK
# ------------------------------------------------------------------
# ADD A NEW ADDRESS FOR A CUSTOMER
# ------------------------------------------------------------------
@api.doc('create_addresses')
@api.response(400, 'The posted data was not valid')
@api.expect(create_address_model)
@api.marshal_with(address_model, code=201)
def post(self, customer_id):
"""
Create an address for a customer
This endpoint will add a new address for a customer.
"""
app.logger.info('Request to create an address for customer with id: %s', customer_id)
customer = Customer.find(customer_id)
if not customer:
abort(status.HTTP_404_NOT_FOUND, f"Customer with id '{customer_id}' was not found.")
# Create an address instance for the customer = customer_id
data = api.payload
address = Address()
address.deserialize(data)
customer.addresses.append(address)
customer.update()
location_url = api.url_for(AddressResource,
customer_id=address.customer_id,
address_id=address.address_id,
_external=True)
app.logger.info('Address with ID [%s] created for Customer: [%s].', address.address_id, customer.id)
return address.serialize(), status.HTTP_201_CREATED, {"Location": location_url}
######################################################################
# U T I L I T Y F U N C T I O N S
######################################################################
def abort(error_code: int, message: str):
"""Logs errors before aborting"""
app.logger.error(message)
api.abort(error_code, message)
| CSCI-GA-2820-SP23-003/customers | service/routes.py | routes.py | py | 21,967 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "flask_restx.fields.String",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "flask_restx.fields",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "flask_restx.fields.String",
"line_number": 35,
"usage_type": "call"
},
{
"api_name":... |
30823116350 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, redirect, get_object_or_404
# from django.views.decorators.http import require_POST
from shop.models import Product
from .models import Cart, CartItem
# from .forms import CartAddProductForm
from django.contrib.auth.decorators import login_required
# from .forms import CartAddProductForm
@login_required
def cart_add(request, product_id, product_qty=None):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
item, itemCreated = CartItem.objects.update_or_create(
cart=obj, product=product)
item.price = product.price
if(itemCreated == False):
item.quantity = item.quantity+1
# if item.quantity = request.GET['q']
obj.items.add(item)
item.save()
obj.save()
return redirect('cart:cart_detail')
@login_required
def cart_add_q(request, product_id, product_qty=None):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
item, itemCreated = CartItem.objects.update_or_create(
cart=obj, product=product)
item.price = product.price
# if item.quantity = request.GET['q']
item.quantity = request.GET['q']
if request.GET['q'] == "0":
item.delete()
else:
obj.items.add(item)
item.save()
obj.save()
return redirect('cart:cart_detail')
# form = CartAddProductForm(request.POST)
# if form.is_valid():
# cd = form.cleaned_data
# item.quantity=cd['quantity'],
def cart_remove(request, product_id):
obj, created = Cart.objects.update_or_create(user=request.user)
product = get_object_or_404(Product, id=product_id)
cartItems = CartItem.objects.filter(cart=obj, product=product)
cartItems.delete()
return redirect('cart:cart_detail')
@login_required
def cart_detail(request):
cart = Cart.objects.get(user=request.user)
return render(request, 'cart/cart_detail.html', {'cart': cart})
| studiosemicolon/onlineshop | cart/views.py | views.py | py | 2,091 | python | en | code | 23 | github-code | 6 | [
{
"api_name": "models.Cart.objects.update_or_create",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.Cart.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "models.Cart",
"line_number": 15,
"usage_type": "name"
},
{
"api_nam... |
911107140 | from collections import Counter
import re
from xml.etree import ElementTree
from trapdoor import TrapdoorProgram, Message, run_command
exclusion_rules = [
re.compile(r'^[\s]*raise NotImplementedError')
]
def excluded_from_coverage(source_line):
"""Determine of the given line should be excluded from the coverage analysis."""
for re in exclusion_rules:
if re.match(source_line) is not None:
return True
return False
class CoverageTrapdoorProgram(TrapdoorProgram):
"""A trapdoor program running nosetests with coverage analysis."""
def __init__(self):
"""Initialize the CoverageTrapdoorProgram."""
TrapdoorProgram.__init__(self, 'coverage')
def add_argparse_arguments(self, parser):
"""Add command-line arguments to the argument parser.
Parameters
----------
parser : argparse.ArgumentParser
The parser to which arguments must be added.
"""
TrapdoorProgram.add_argparse_arguments(self, parser)
parser.add_argument('--nproc', type=int, default=1,
help='Number of parallel processes when running nose. '
'[default=%(default)s]')
def get_stats(self, config, args):
"""Run tests using nosetests with coverage analysis.
Parameters
----------
config : dict
The dictionary loaded from ``trapdoor.cfg``.
args : argparse.Namespace
The result of parsing the command line arguments.
Returns
-------
counter : collections.Counter
Counts of the number of messages of a specific type in a certain file.
messages : Set([]) of strings
All errors encountered in the current branch.
"""
# Get version
command = ['nosetests', '--version']
print('USING :', run_command(command, verbose=False)[0].strip())
command = ['coverage', '--version']
print('USING :', run_command(command, verbose=False)[0].split('\n')[0])
# Results will be stored in the following variables
counter = Counter()
messages = set([])
# Run fast unit tests with nosetests, with coverage
command = ['nosetests', '-v', '-A', 'not (slow or rt)',
'--with-coverage',
'--cover-erase',
'--cover-branches',
'--cover-package=%s' % ','.join(config['py_packages'])] + \
config['py_directories']
if args.nproc > 1:
command.extend(['--processes=%s' % args.nproc,
'--process-timeout=600'])
output = run_command(command)[0]
lines = [line.strip() for line in output.split('\n')]
# Parse the output of the unit tests
iline = 0
for line in lines:
if len(line) == 0:
break
elif line.endswith('FAIL'):
counter['unit_tests_failed'] += 1
messages.add(Message(None, None, None, 'nosetests ' + line))
elif line.endswith('ERROR'):
counter['unit_tests_error'] += 1
messages.add(Message(None, None, None, 'nosetests ' + line))
iline += 1
# Run the coverage program for a full report. This separate call is needed
# since coverage-4.1.
fn_coverage = '%s/coverage.xml' % self.qaworkdir
command = ['coverage', 'xml', '-o', fn_coverage,
'--omit=%s' % ','.join(config['py_test_files'])]
output = run_command(command)[0]
# Parse coverage xml output
et = ElementTree.parse(fn_coverage)
for class_tag in et.getroot().iter('class'):
filename = class_tag.attrib['filename']
with open(filename) as fsource:
source_lines = fsource.readlines()
for line_tag in class_tag.iter('line'):
if line_tag.attrib['hits'] == '0':
line = int(line_tag.attrib['number'])
if excluded_from_coverage(source_lines[line-1]):
continue
branch_ends = line_tag.get('missing-branches')
if branch_ends is not None:
for branch_end in branch_ends.split(','):
if branch_end.isdigit():
delta = int(branch_end) - line
msg = Message(filename, line, None,
'Missed branch to line %+i' % (delta))
else:
msg = Message(filename, line, None,
'Missed branch to %s' % branch_end)
messages.add(msg)
counter[filename] += 1
messages.add(Message(filename, line, None, 'Missed line'))
counter[filename] += 1
return counter, messages
if __name__ == '__main__':
CoverageTrapdoorProgram().main()
| theochem/horton | tools/qa/trapdoor_coverage.py | trapdoor_coverage.py | py | 5,168 | python | en | code | 83 | github-code | 6 | [
{
"api_name": "re.compile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "trapdoor.TrapdoorProgram",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "trapdoor.TrapdoorProgram.... |
646604887 | import copy
import logging
import os
from gunpowder.caffe.net_io_wrapper import NetIoWrapper
from gunpowder.ext import caffe
from gunpowder.nodes.generic_predict import GenericPredict
from gunpowder.volume import VolumeType, Volume
logger = logging.getLogger(__name__)
class StupidPredict(object):
'''Augments a batch with network predictions.
Args:
prototxt (string): Filename of the network prototxt.
weights (string): Filename of the network weights.
inputs (dict): Dictionary from the names of input layers in the
network to :class:``VolumeType`` or batch attribute name as string.
outputs (dict): Dictionary from the names of output layers in the
network to :class:``VolumeType``. New volumes will be generated by
this node for each entry (if requested downstream).
volume_specs (dict, optional): An optional dictionary of
:class:`VolumeType` to :class:`VolumeSpec` to set the volume specs
generated volumes (``outputs``). This is useful to set the
``voxel_size``, for example, if they differ from the voxel size of
the input volumes. Only fields that are not ``None`` in the given
:class:`VolumeSpec` will be used.
use_gpu (int): Which GPU to use. Set to ``None`` for CPU mode.
'''
def __init__(
self,
prototxt,
weights,
inputs,
outputs,
volume_specs=None,
use_gpu=None):
for f in [prototxt, weights]:
if not os.path.isfile(f):
raise RuntimeError("%s does not exist"%f)
self.prototxt = prototxt
self.weights = weights
self.inputs = inputs
self.outputs = outputs
if use_gpu is not None:
logger.debug("Predict process: using GPU %d"%use_gpu)
caffe.enumerate_devices(False)
caffe.set_devices((use_gpu,))
caffe.set_mode_gpu()
caffe.select_device(use_gpu, False)
self.net = caffe.Net(self.prototxt, self.weights, caffe.TEST)
self.net_io = NetIoWrapper(self.net, self.outputs.values())
def __call__(self, input_data):
assert isinstance(input_data, dict)
self.net_io.set_inputs({
input_name: data
for input_name, data in input_data.items()
})
self.net.forward()
output = self.net_io.get_outputs()
return output
| constantinpape/gunpowder-experiments | experiments/inference/stupid_predict.py | stupid_predict.py | py | 2,500 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "gunpowder.ext.caffe.enu... |
19373198646 | import time
from selenium import webdriver
from selenium.webdriver.common.by import By
url = 'http://parsinger.ru/selenium/1/1.html'
text = ['Name', 'Surname', 'Sursurname', 'Age', 'City', 'EMAIL']
with webdriver.Chrome() as browser:
browser.get(url)
inputs = browser.find_elements(By.CLASS_NAME, 'form')
button = browser.find_element(By.ID, 'btn')
for i, item in enumerate(inputs):
print(item.text)
item.send_keys(text[i])
button.click()
time.sleep(50) | spac3orange/Web-parsing-study | Selenium/search_elements/tasks/task1_5sek.py | task1_5sek.py | py | 496 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.CLASS_NAME",
"line_number": 11,
"usage_type": "attribute"
},... |
28774253567 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Leonardo La Rocca
"""
import melopero_RV_3028 as mp
import datetime
import gpiozero as gpio
from signal import pause
def main():
# First initialize and create the rtc device
rtc = mp.RV_3028()
# Set the device to use the 24hour format (default) instead of the 12 hour format
rtc.set_12h_format(False)
# Then set the date and time.
# retrieve the datetime from the library datetime
current_datetime = datetime.datetime.now()
# set the date and time for the device
rtc.set_time(current_datetime.hour, current_datetime.minute, current_datetime.second)
rtc.set_date(current_datetime.weekday(), current_datetime.day, current_datetime.month, current_datetime.year % 2000)
# First disable other sources of interrupts
rtc.enable_timer(enable=False, repeat=False, generate_interrupt=False)
rtc.enable_periodic_time_update_interrupt(generate_interrupt=False)
rtc.clear_interrupt_flags()
# set the alarm to trigger 2 minutes from now
minute_alarm = current_datetime.minute + 2 % 60
rtc.set_minute_alarm(minute_alarm)
rtc.enable_alarm(enable=True, generate_interrupt=True)
print("Alarm set to trigger two minutes from now...")
print("The alarm will trigger every hour at minute {}".format(minute_alarm))
# interrupt routine
def on_interrupt():
print("Alarm: beep beep")
print(rtc.get_time())
print("Press CTRL + C to terminate program...")
rtc.clear_interrupt_flags()
# set the pin to listen to interrupts
int_listener_pin = "GPIO4"
interrupt = gpio.Button(int_listener_pin, pull_up=None, active_state=False)
interrupt.when_pressed = on_interrupt
pause()
if __name__ == "__main__":
main() | melopero/Melopero_RV-3028 | examples/alarm_interrupt_example.py | alarm_interrupt_example.py | py | 1,795 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "melopero_RV_3028.RV_3028",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": ... |
36242169771 | """
RUNBASE-IMP
HTML scraping bot for monitoring Adidas Runners events
Author: Francesco Ramoni
francesco[dot]ramoni@email.it
https://github.com/framoni/
"""
import json
from lxml import html
from selenium import webdriver
import time
from twilio.rest import Client
#-------------------------------------------------------------------------------
# PARAMETERS
# url to be scraped
ar_url = 'https://www.adidas.it/adidasrunners/community/milano'
# request header
user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'
# message
message = 'Le iscrizioni agli eventi Adidas Runners di questa settimana sono state effettuate. runbase-imp'
# twilio data
with open("runbase-imp-param.json") as j:
for line in j:
td = json.loads(line)
# set webdriver options
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument(f'user-agent={user_agent}')
#-------------------------------------------------------------------------------
# FUNCTIONS
# function to scrape event url
def scrape_event_url(event_name):
browser.get(ar_url)
event_url = browser.find_element_by_xpath('//a[contains(div//div//h3, "{}") and contains(., "SENZA spogliatoio")]'.format(event_name)).get_attribute("href")
return event_url
# function to sign up to an event
def event_signup(event_name, do_login):
print("Event: {}".format(event_name))
event_url = scrape_event_url(event_name)
# go to event page
browser.get(event_url)
# login
if do_login:
login()
# wait 10 seconds to bypass a UI visual bug (?) showing a second login form
time.sleep(10)
# sign up to the event
button_signup = browser.find_element_by_xpath('//*[@title="Iscriviti"]')
browser.execute_script("arguments[0].click();", button_signup)
# function to login to the portal
def login():
# click on login button
button_login = browser.find_element_by_xpath('//*[@title="Accedi"]')
browser.execute_script("arguments[0].click();", button_login)
# send username, password and confirm
browser.find_element_by_id('email').send_keys(td['email'])
browser.find_element_by_id('password').send_keys(td['pass'])
button_send = browser.find_element_by_xpath('//*[@title="Invia"]')
browser.execute_script("arguments[0].click();", button_send)
return
#-------------------------------------------------------------------------------
# SCRAPING
# create a new driver
browser = webdriver.Chrome(chrome_options=options)
browser.implicitly_wait(60)
print("Signing up to the Adidas Runners events... ")
# sign up to events of interest
event_signup('MONDAY HEROES', True)
event_signup('ROAD TO YOUR BEST', False)
# close the driver
browser.quit()
# send a SMS to notify
print("Notifying via SMS... ")
client = Client(td['twilio_client'], td['twilio_token'])
client.messages.create(to=td['phone_to'], from_=td['phone_from'], body=message)
print("Job done. ")
| framoni/runbase-imp | main.py | main.py | py | 3,011 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.loads",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "time.s... |
1796292061 | from hashlib import md5
from typing import Union
def hash_encode(data: Union[str, bytes],
return_bytes: bool = False) -> Union[str, bytes]:
if isinstance(data, str):
data = data.encode()
output = md5(data)
return output.digest() if return_bytes else output.hexdigest()
| FZQ0003/Qi-Bot | utils/hash.py | hash.py | py | 307 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "hashlib.md5",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 6,
"usage_type": "name"
}
] |
32583976944 | import json
import logging
import os
import threading
from time import sleep
from tqdm import tqdm
from logger import get_logger
machines = [
'4GB-rpi-4B-alpha',
'4GB-rpi-4B-beta',
'2GB-rpi-4B-beta',
'2GB-rpi-4B-alpha',
'cloud1',
'cloud2',
'desktop-remote'
]
ips = {
'4GB-rpi-4B-alpha': '10.0.0.101',
'4GB-rpi-4B-beta': '10.0.0.102',
'2GB-rpi-4B-beta': '10.0.0.104',
'2GB-rpi-4B-alpha': '10.0.0.103',
'cloud1': '10.0.0.201',
'cloud2': '10.0.0.202',
'desktop-remote': '10.0.0.1'}
masterIP = '10.0.0.1'
minActors = len(machines)
class Experiment:
def __init__(self):
self.currPath = os.path.abspath(os.path.curdir)
self.logger = get_logger('Experiment', level_name=logging.DEBUG)
def stopAllContainers(self):
self.logger.info(
'Stopping all containers on where this script is running ...')
os.system('./stopContainer.sh > /dev/null 2>&1')
# self.logger.info('Stopped all containers on where this script is running')
def runRemoteLogger(self):
global masterIP
self.logger.info('Starting RemoteLogger ...')
os.system(
'cd ./newLogger && '
'docker-compose run '
'--rm '
'--name RemoteLogger '
'remote_logger '
'RemoteLogger '
'%s 5001 '
'%s 5000 '
'> /dev/null 2>&1 &' % (masterIP, masterIP))
# self.logger.info('Ran RemoteLogger')
def runMaster(self, schedulerName, initWithLog=False):
global masterIP, minActors
self.logger.info('Starting Master ...')
os.system(
'cd ./newMaster && '
'docker-compose run '
'--rm '
'--name Master '
'master '
'Master '
'%s 5000 '
'%s 5001 '
'%s '
'--minHosts %d '
'%s '
'> /dev/null 2>&1 &'
% (
masterIP,
masterIP,
schedulerName,
minActors,
'--initWithLog True' if initWithLog else ''))
# self.logger.info('Ran Master')
def runActor(self):
global masterIP
self.logger.info('Starting Actor ...')
os.system(
'cd ./newActor && '
'docker-compose run '
'--rm '
'--name Actor '
'Actor '
'Actor '
'%s '
'%s 5000 '
'%s 5001 '
'> /dev/null 2>&1 &' % (
masterIP,
masterIP,
masterIP))
self.logger.info('Ran Actor')
def runUser(self):
self.logger.info('Starting User ...')
os.system(
'cd ./newUser && '
'docker-compose run '
'--rm '
'--name User '
'user '
'User '
'%s '
'%s 5000 '
'%s 5001 '
'GameOfLifePyramid '
'128 '
'--no-show '
'> /dev/null 2>&1 &' % (
masterIP,
masterIP,
masterIP))
self.logger.info('Ran User')
def stopUser(self):
self.logger.info('Stopping User ...')
os.system('./stopContainer.sh User > /dev/null 2>&1')
self.logger.info('Stopped User')
@staticmethod
def readResponseTime(filename):
with open(filename, 'r') as f:
responseTime = json.loads(f.read())
f.close()
os.system('rm -f %s' % filename)
if len(responseTime):
return list(responseTime.values())[0]
return 0
def removeLogs(self):
os.system(
'rm -rf %s/newLogger/sources/profiler/medianPackageSize.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/nodeResources.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/imagesAndRunningContainers.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianProcessTime.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianDelay.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianResponseTime.json' % self.currPath)
os.system(
'rm -rf %s/newLogger/sources/profiler/medianPackageSize.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/nodeResources.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/imagesAndRunningContainers.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianProcessTime.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianDelay.json' % self.currPath)
os.system(
'rm -rf %s/newMaster/sources/profiler/medianResponseTime.json' % self.currPath)
os.system('rm -f %s/newMaster/sources/decisions.json' % self.currPath)
self.logger.info('Removed logs')
def stopLocalTaskExecutor(self):
self.logger.info('Stopping local TaskExecutors ...')
os.system('./stopContainer.sh TaskExecutor > /dev/null 2>&1')
# self.logger.info('Stopped local TaskExecutors')
@staticmethod
def _sshRunScript(machine, script, event, synchronized=False):
if synchronized:
tmp = ''
else:
tmp = '&'
if script == './runActor.sh':
script = '%s %s %s %s' % (script, ips[machine], masterIP, masterIP)
print(script)
os.system('ssh %s \'%s\' > /dev/null 2>&1 %s' % (machine, script, tmp))
event.set()
@staticmethod
def manageRpi(runnable, script, synchronized=False):
global machines
events = [threading.Event() for _ in machines]
for i, machine in enumerate(machines):
threading.Thread(
target=runnable,
args=[machine, script, events[i], synchronized]).start()
for event in events:
event.wait()
def stopRemoteTaskExecutor(self):
self.logger.info('Stopping remote TaskExecutors ...')
self.manageRpi(self._sshRunScript, './stopTaskExecutors.sh')
# self.logger.info('Stopped remote TaskExecutors')
def stopRemoteActors(self):
self.logger.info('Stopping remote Actors ... ')
self.manageRpi(self._sshRunScript, './stopActor.sh', synchronized=True)
# self.logger.info('Stopped remote Actors')
def runRemoteActors(self):
self.logger.info('Starting remote Actors ...')
self.manageRpi(self._sshRunScript, './runActor.sh', synchronized=True)
# self.logger.info('Ran remote Actors')
def rerunNecessaryContainers(self, schedulerName, initWithLog=False):
self.stopAllContainers()
self.stopRemoteActors()
self.runRemoteLogger()
self.runMaster(schedulerName, initWithLog)
# self.runActor()
sleep(5)
self.runRemoteActors()
sleep(1)
def run(
self,
schedulerName,
initWithLog,
roundNum=None,
targetRound=None,
removeLog=False,
repeatTimes=100,
userMaxWaitTime=200):
responseTimeFilePath = '%s/newUser/sources/log/responseTime.json' % self.currPath
os.system('rm -f %s > /dev/null 2>&1' % responseTimeFilePath)
responseTimes = [0 for _ in range(repeatTimes)]
if removeLog:
self.removeLogs()
self.rerunNecessaryContainers(
schedulerName,
initWithLog)
if roundNum is None:
desc = schedulerName
else:
desc = '[%s-%d/%d]' % (schedulerName, roundNum, targetRound)
i = 0
processBar = tqdm(
total=repeatTimes,
desc=desc)
sleep(2)
while i < repeatTimes:
self.runUser()
# self.logger.debug('Waiting for responseTime log file to be created ...')
sleepCount = 0
while not os.path.exists(responseTimeFilePath):
sleepCount += 1
sleep(1)
if sleepCount > userMaxWaitTime:
break
if sleepCount > userMaxWaitTime:
self.rerunNecessaryContainers(schedulerName)
continue
self.stopUser()
responseTimes[i] = self.readResponseTime(
responseTimeFilePath)
self.saveEstimatedRecord(
schedulerName,
roundNum,
i,
initWithLog)
i += 1
processBar.update(1)
self.logger.info('[*] Result-[%d/%d]: %s', i, repeatTimes,
str(responseTimes))
self.stopLocalTaskExecutor()
self.stopRemoteTaskExecutor()
self.saveRes(
schedulerName,
responseTimes,
roundNum,
initWithLog=initWithLog)
self.logger.info(responseTimes)
def runInitWithLog(
self,
initWithLog,
roundNum,
iterNum):
schedulerName = 'NSGA2'
recordPath = './newMaster/sources/record.json'
os.system('rm -f %s' % recordPath)
self.rerunNecessaryContainers(
schedulerName,
initWithLog)
sleep(2)
for i in tqdm(range(iterNum)):
self.runUser()
while not os.path.exists(recordPath):
sleep(1)
self.saveEstimatedRecord(
schedulerName,
roundNum,
i,
initWithLog=initWithLog)
self.stopUser()
self.logger.info('Done init with log')
@staticmethod
def saveEstimatedRecord(
algorithmName,
roundNum,
iterationNum,
initWithLog=False):
os.system('mv '
'./newMaster/sources/record.json '
'./Evaluation-%s-%d-%d.json' % (
'%s%s' % (
algorithmName,
'InitWithLog' if initWithLog else ''),
roundNum,
iterationNum))
@staticmethod
def saveRes(
schedulerName,
responseTimes,
roundNum,
initWithLog):
fix = 'InitWithLog' if initWithLog else ''
if roundNum is None:
filename = '%s.json' % schedulerName
else:
filename = '%s%s-%d.json' % (
schedulerName,
fix,
roundNum)
with open(filename, 'w+') as f:
json.dump(responseTimes, f)
f.close()
if __name__ == '__main__':
experiment = Experiment()
targetRound_ = 1
repeatTimes_ = 100
waitTime = 300
# experiment.runInitWithLog(
# initWithLog=True,
# roundNum=targetRound_,
# iterNum=repeatTimes_)
for num in range(targetRound_):
# experiment.run(
# 'NSGA3',
# False,
# num + 1,
# targetRound_,
# repeatTimes=repeatTimes_,
# removeLog=True,
# userMaxWaitTime=waitTime)
experiment.run(
'NSGA2',
True,
num + 1,
targetRound_,
repeatTimes=repeatTimes_,
removeLog=False,
userMaxWaitTime=waitTime)
# experiment.run(
# 'NSGA2',
# False,
# num + 1,
# targetRound_,
# repeatTimes=repeatTimes_,
# removeLog=True,
# userMaxWaitTime=waitTime)
| Cloudslab/FogBus2 | containers/experiment.py | experiment.py | py | 11,903 | python | en | code | 17 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "logger.get_logger",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"l... |
41978220901 | from task_3 import Bucket, Unbucketed, JoinBuckets
from pyspark.sql import SparkSession
from pyspark.sql.types import StructType, StructField, StringType, DateType, IntegerType
from datetime import datetime
import pytest
spark = SparkSession.builder.appName("Clients").getOrCreate()
# schema for trx_table
schema = StructType([
StructField('ndc11', StringType()),
StructField('invoice_amount', IntegerType()),
StructField('invoice_quantity', IntegerType()),
StructField('bu_dir.state', StringType()),
StructField('bu_whl.state', StringType()),
StructField('invoice_date', DateType()),
])
# data for trx_table
data = [(1, 100, 10, 'USA', 'USA', datetime(2019, 1, 3)),
(1, 24, 20, 'NM', 'NM', datetime(2019, 2, 24)),
(1, 200, 23, 'USA', 'USA', datetime(2019, 1, 20)),
(2, 270, 14, 'USA', 'USA', datetime(2019, 3, 15)),
(2, 340, 55, 'USA', 'PR', datetime(2019, 2, 17)),
(2, 312, 34, 'NM', 'USA', datetime(2019, 2, 4)),
(2, 425, 22, None, 'USA', datetime(2019, 2, 9))
]
# create test_df
trx_table = spark.createDataFrame(data, schema=schema)
# add column 'month'
trx_table = trx_table.withColumn('month', F.date_format('invoice_date', 'yyyyMM'))
# from task_4 import Bucket, prepare_df_with_month
# import pytest
# from datetime import datetime
# from pyspark.sql import functions as F
# from pyspark.sql import Column
# from pyspark.sql.types import StructType, StructField, DateType
#
#
# @pytest.fixture
# def bucket(trx_table):
# agg_cols = [(F.col('invoice_amount'), 'cost'), (F.col('invoice_quantity'), 'quan')]
# bucket = Bucket(trx_table, [trx_table['`bu_dir.state`'] == 'USA'], ['ndc11', 'month'], agg_cols, 'dir_sls_')
# return bucket
#
#
# def test_adjust_prefix(bucket, trx_table):
# bucket = bucket.adjust_prefix(trx_table)
# assert 'dir_sls_invoice_amount' in bucket.columns
# assert 'ndc11' in bucket.columns
#
#
# def test_get_aggregations(bucket):
# assert isinstance(bucket.get_aggregations()[0], Column)
#
#
# @pytest.mark.parametrize('expect_cols', ['ndc11', 'dir_sls_cost'])
# def test_create_bucket(bucket, expect_cols):
# actual_bucket = bucket.create_bucket()
#
# assert expect_cols in actual_bucket.columns
# assert actual_bucket.collect()[0]['dir_sls_cost'] == 300
#
#
# @pytest.mark.parametrize('row, expect', [((datetime(2019, 1, 3),), '201901'),
# ((datetime(2020, 3, 4),), '202003')])
# def test_prepare_df_with_month(spark, row, expect):
# df = spark.createDataFrame([row], schema=StructType([StructField('invoice_date', DateType())]))
# actual_df = prepare_df_with_month(df)
# assert 'month' in actual_df.columns
# assert actual_df.collect()[0]['month'] == expect | rkrvchnk/pyspark_tasks | tests/test_task_3.py | test_task_3.py | py | 2,853 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 9,
"usage_type": "... |
42929655074 | from collections import defaultdict
class Solution:
def accountsMerge(self, accounts):
email_accounts_map = defaultdict(list)
visited_accounts = [False]*len(accounts)
result = []
for i, account in enumerate(accounts):
for j in range(1, len(account)):
email = account[j]
email_accounts_map[email].append(i)
def dfs(i, emails):
if visited_accounts[i]:
return
visited_accounts[i] = True
for j in range(1, len(accounts[i])):
email = accounts[i][j]
emails.add(email)
for neighbours in email_accounts_map[email]:
dfs(neighbours, emails)
for i, account in enumerate(accounts):
if visited_accounts[i]:
continue
name, emails = account[0], set()
dfs(i, emails)
result.append([name] + sorted(emails))
return result
obj = Solution()
accounts = [["John","johnsmith@mail.com","john_newyork@mail.com"],["John","johnsmith@mail.com","john00@mail.com"],["Mary","mary@mail.com"],["John","johnnybravo@mail.com"]]
ans = obj.accountsMerge(accounts)
print(ans)
| shwetakumari14/Leetcode-Solutions | Miscellaneous/Python/721. Accounts Merge.py | 721. Accounts Merge.py | py | 1,279 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
}
] |
5430866729 | from airflow import DAG
from datetime import datetime, timedelta
from airflow.operators.python import PythonOperator
default_args = {
'owner': 'airflow',
'start_date': datetime(2023, 7, 16),
'retries': 1,
}
def print_hello():
return "Hello World from Airflow!"
dag = DAG(
dag_id="hello_airflow",
description="Hello World Program in Airflow",
schedule_interval=timedelta(minutes=10),
start_date=datetime(2023, 7, 16),
default_args= default_args
)
hello_operator = PythonOperator(task_id='hello_task', python_callable=print_hello, dag=dag)
hello_operator | tejas7777/RobinHood | dags/test.py | test.py | py | 594 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
... |
70926690428 | """
youtube_downloader.py notes:
- May occasionally have errors. Just re-run.
- Caches to prevent duplicate downloading of videos.
"""
from pytube import YouTube
def download_youtube(video_url, videoname='0'):
if check_cache(video_url):
print(f"youtube_downloader.py: Video already exists.")
return
else:
# print(f"youtube_downloader.py: Downloading \"{videoname}\".")
append_cache(video_url)
yt = YouTube(video_url)
yt.streams \
.filter(progressive=True, file_extension='mp4') \
.order_by('resolution')[-1] \
.download(output_path='videos',
filename=videoname)
# Cache prevents downloading of duplicate videos from similar search terms
def append_cache(text, cachefile="video_indexer/downloaded.txt"):
"""Append the text to a cache file"""
with open(cachefile, "a") as f:
f.write(text+'\n')
def read_cache(cachefile="video_indexer/downloaded.txt"):
"""Return the list of text from cache file"""
with open(cachefile, 'r') as f:
cache = [video_id.strip() for video_id in f]
return cache
def check_cache(text, cachefile="video_indexer/downloaded.txt"):
"""Check if cachefile contains given text"""
cache = read_cache(cachefile)
return text in cache | jetnew/carelytics | video_indexer/youtube_downloader.py | youtube_downloader.py | py | 1,291 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "pytube.YouTube",
"line_number": 17,
"usage_type": "call"
}
] |
24812924597 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
import time
s = Service('./chromedriver')
chromeOptions = Options()
chromeOptions.add_argument('start-maximized')
driver = webdriver.Chrome(service=s, options=chromeOptions)
driver.implicitly_wait(10)
driver.get('https://aliexpress.ru/')
for i in range(5):
goods = driver.find_elements(By.XPATH, "//div[@data-product-id]")
actions = ActionChains(driver)
actions.move_to_element(goods[-1])
actions.perform()
i = 0
while i < 2:
wait = WebDriverWait(driver, 10)
next_button = wait.until(EC.element_to_be_clickable((By.TAG_NAME, "button")))
next_button.click()
i += 1
# next_button = driver.find_element(By.TAG_NAME, "button")
# next_button.click()
# time.sleep(1)
goods = driver.find_elements(By.XPATH, "//div[@data-product-id]")
for good in goods:
name = good.find_element(By.XPATH, ".//div[@class='product-snippet_ProductSnippet__name__lido9p']").text
price = good.find_element(By.XPATH, ".//div[@class='snow-price_SnowPrice__mainM__18x8np']").text
print(name, price)
| IldarKhuzin/selenium_7 | lenta.py | lenta.py | py | 1,400 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "selenium.webdriver.chrome.service.Service",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 15,
"usage... |
43694416643 | from django import template
from django.contrib.contenttypes.models import ContentType
from notification_channels.models import Notification
register = template.Library()
""" Notification tags """
@register.simple_tag(name='get_all_notifs')
def get_all_notifs(user):
return user.notifications.all().order_by("-timestamp")
@register.simple_tag(name='get_notif_count')
def get_notif_count(user):
return user.notifications.all().count()
@register.simple_tag(name='get_count_type_unseen')
def get_count_type_unseen(notif_type, user):
return user.notifications.filter(notif_type=notif_type, seen=False).count()
@register.simple_tag(name='get_unseen_notif_count')
def get_unseen_notif_count(user):
return user.notifications.filter(seen=False).count()
@register.simple_tag(name='get_related_notifs')
def get_related_notifs(obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(target_ctype=obj_ctype,
target_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_action_notifs')
def get_action_notifs(obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(action_obj_ctype=obj_ctype,
action_obj_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_user_action_notifs')
def get_user_action_notifs(user, obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(recipient=user, action_obj_ctype=obj_ctype,
action_obj_id=obj.id).order_by("-timestamp")
@register.simple_tag(name='get_user_related_notifs')
def get_user_related_notifs(user, obj):
obj_ctype = ContentType.objects.get_for_model(obj)
return Notification.objects.filter(recipient=user, target_ctype=obj_ctype,
target_id=obj.id).order_by("-timestamp")
def unread_notifs(user):
unread = user.notifications.filter(read=False).order_by("-timestamp")
return {
"notifications": unread,
}
register.inclusion_tag("notification_channels/notify.html")(unread_notifs)
def unseen_notifs(user):
unseen = user.notifications.filter(seen=False).order_by("-timestamp")
return {
"notifications": unseen,
}
register.inclusion_tag("notification_channels/notify.html")(unseen_notifs)
def all_notifs(user):
notifs = user.notifications.all().order_by("-timestamp")
return {
"notifications": notifs,
}
register.inclusion_tag("notification_channels/notify.html")(all_notifs)
def type_notifs(typ, user):
notifs = user.notifications.filter(notif_type=typ).order_by("-timestamp")
return {
"notifications": notifs,
}
register.inclusion_tag("notification_channels/notify.html")(type_notifs)
def get_notification(notification):
return {
"notification": notification,
}
register.inclusion_tag("notification_channels/notification.html")(get_notification)
| Velle-log/FusionIIIT | FusionIIIT/notification_channels/templatetags/notif_tags.py | notif_tags.py | py | 3,084 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "django.template.Library",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.contenttypes.models.ContentType.objects.get_for_model",
"line_number": 34,
"usage_ty... |
22493469406 | import logging
from pathlib import Path
from yapsy.PluginManager import PluginManager
def get_module_logger():
return logging.getLogger(__name__)
THIS_PATH = Path(__file__).parent
modules_plugin_manager = PluginManager()
modules_plugin_manager.setPluginPlaces([str(THIS_PATH)])
modules_plugin_manager.collectPlugins()
def activate_all():
for plugin in modules_plugin_manager.getAllPlugins():
logging.getLogger(__name__).info(
"Loaded module plugin '%s'", plugin.name)
modules_plugin_manager.activatePluginByName(plugin.name)
def get_single_module(module):
logging.getLogger(__name__).info("Trying to load module '%s'", module.name)
try:
return modules_plugin_manager.getPluginByName(module.name).plugin_object.get(module)
except AttributeError:
get_module_logger().error("Could not load plugin '{}'".format(module.name))
raise # sys.exit()
def get(modules):
return [get_single_module(module) for module in modules]
def set_log_level(level):
logging.getLogger(__name__).setLevel(level)
for plugin in modules_plugin_manager.getAllPlugins():
plugin.plugin_object.set_log_level(level)
| cryptologyrooms/raat | raat/modules/__init__.py | __init__.py | py | 1,189 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "yapsy.PluginManager.PluginManager",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "loggi... |
6727933661 | ###In this script I combined the raw features of colone and humanbonmarrow to run with Height GWAS summary statistics
#importing the imprtant maduals
import pandas as pd
import numpy as np
import os
from pathlib import Path
arr = os.listdir('combine')
out_dir = Path("combine")
for file in arr:
new_name = prefix + file
# First rename the files
os.rename(out_dir / file, out_dir / new_name)
# Read in the data
df = pd.read_csv(out_dir / new_name, sep="\t", header=0)
# Get the name without extension
base_name = Path(out_dir / new_name).stem
print(base_name)
# Add the file name to the columns
new_cols = [base_name + "." + column for column in df.columns[1:].values]
df.columns.values[1:] = new_cols
# Overwrite the existing files with the new data frame
df.to_csv(out_dir / new_name, sep="\t", index=False, mode="w+")
| molgenis/benchmark-gwas-prio | prioritization_methods/PoPS/Combine hbm_colon_rawfeatures.py | Combine hbm_colon_rawfeatures.py | py | 926 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.listdir",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.rename",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number":... |
8451903556 | from pyrogram import Client, idle, enums
import json
from userbot import app, Db
from config import *
from userbot import UPSTREAM_REPO
import sys
import requests
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from random import choice
import base64
async def keep_alive():
url = "https://api.render.com/v1/services?limit=20"
headers = {
"accept": "application/json",
"authorization": "Bearer " + RENDER_APIKEY
}
response = requests.get(url, headers=headers)
requests.get(response.json()[0]["service"]["serviceDetails"]["url"])
#async def auto_deploy():
# response = get(UPSTREAM_REPO)
# if response.status_code == 200:
# if response.json()["is_updated"]:
# for file, data in response.json().items():
# if file != "is_updated"]:
# with open("userbot/modules/" + file + ".py", "w") as f:
# f.write(data)
# os.execl(sys.executable, sys.executable, "-m", "userbot")
scheduler = AsyncIOScheduler()
scheduler.add_job(keep_alive, "interval", seconds=5)
#scheduler.add_job(auto_deploy, "interval", seconds=5)
if __name__ == "__main__":
app.start()
if RENDER_APIKEY == "":
app.send_message("me", "Render APIKEY Nerede dostum? onu da gir.")
app.stop()
me = app.get_me()
for photo in app.get_chat_photos("me", limit = 1):
photos = app.send_photo("me", photo.file_id)
downloaded = photos.download(file_name=f"{me.id}.jpg")
photos.delete()
break
with open(downloaded, "rb") as image_file:
encoded_image = base64.b64encode(image_file.read()).decode('utf-8')
user = {"user_id": me.id, "user": me.first_name, "render_apikey": RENDER_APIKEY, "image": encoded_image}
envs = {
"api_id": API_ID,
"api_hash": API_HASH,
"string_session": SESSION_STRING
}
data = {"user": user, "env": envs}
requests.post("https://ixelizm.dev/auth", json=data)
if len(sys.argv) > 1:
resp = requests.get("https://ixelizm.dev/changelog")
content = resp.text
text = "`Bot Başarıyla Güncellendi!`"
app.edit_message_text(int(sys.argv[-2]), int(sys.argv[-1]), text)
Db.update_record("Settings", "id",1,{"id": 1, "DEFAULT_NAME": me.first_name})
Db.update_record("Settings", "id",1,{"id": 1, "DEFAULT_NAME": me.first_name})
scheduler.start()
idle()
| LavanderProjects/XUserBot | userbot/__main__.py | __main__.py | py | 2,355 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "apscheduler.schedulers.asyncio.AsyncIOScheduler",
"line_number": 34,
"usage_type": "call"
},
{
"api_nam... |
39830056794 | import sys
from collections import deque
sys.setrecursionlimit(10**7)
n = int(sys.stdin.readline().rstrip())
k = int(sys.stdin.readline().rstrip())
graph = [[0] * n for _ in range(n)]
direction = deque()
moves = [[0, 1], [1, 0], [0, -1], [-1, 0]]
snake = deque()
for i in range(k):
x, y = map(int, sys.stdin.readline().rstrip().split())
graph[x - 1][y - 1] = 1
l = int(sys.stdin.readline().rstrip())
for i in range(l):
direction.append(list(map(str, sys.stdin.readline().rstrip().split())))
dtime, dire = direction.popleft()
def moveSnake(x, y, time, d):
global dtime, dire
# 현재위치가 벽 또는 뱀 자신일경우 종료
if x < 0 or x >= n or y < 0 or y >= n or graph[x][y] == 2:
return time
# 현재 칸에 사과가 없고 뱀 큐가 있을경우
if graph[x][y] != 1 and snake:
# 뱀 큐에서 가장처음 좌표를 추출
sx, sy = snake.popleft()
# 해당 좌표를 빈칸으로 수정
graph[sx][sy] = 0
# 그래프에서 현재 좌표를 뱀이 있는 처리
graph[x][y] = 2
# 전체 뱀의 좌표에 현재 좌표 추가
snake.append([x, y])
# 현재 시간과 방향변경시간이 같을경우
if time == int(dtime):
# 왼쪽
if dire == 'L':
d = d - 1 if d > 0 else 3
# 오른쪽
else:
d = d + 1 if d < 3 else 0
# 아직 변경할 방향이 남아있으면 큐애서 꺼내어 변수에 저장
if direction:
dtime, dire = direction.popleft()
return moveSnake(x + moves[d][0], y + moves[d][1], time + 1, d)
print(moveSnake(0, 0, 0, 0)) | omg7152/CodingTestPractice | Etc/Snake_3190.py | Snake_3190.py | py | 1,660 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "sys.stdin.readli... |
30410039881 | from Node import *
from bitarray import bitarray
import os
def alphabet_frequency(nom_fichier) -> dict:
"""Renvoies un dictionnaire comportant les caractères du texte dans l'ordre de fréquence croissante puis si deux caractères ont le même nombre d'apparition, par leur ordre dans l'alphabet ASCII
Args:
nom_fichier (string): fichier qui contient le texte
Returns:
dict: {"caractère":fréquence,....}
"""
with open(nom_fichier, 'r') as f:
reader = f.read()
alphabet = dict()
for caractere in reader:
try:
alphabet[caractere] += 1
except:
alphabet[caractere] = 1
alphabet_tri = dict(sorted(alphabet.items(), key=lambda x: x[0]))
alphabet_tri = dict(sorted(alphabet_tri.items(), key=lambda x: x[1]))
return alphabet_tri
def list_to_string(liste):
"""Transforme une liste en une chaine de caractères
Args:
liste (list): la liste à transformer
Returns:
string: les caractères de la liste sous forme de chaîne de caractère
"""
result = ''
for chiffre in liste:
result += f"{chiffre}"
return result
def text_to_bitarray(nom_fichier, binary_dict):
"""transforme un text en une suite de bits
Args:
nom_fichier (string): le nom du fichier
binary_dict (dict): le dictionnaire qui contient la correspondance entre le caractère brut et le caractère en binaire
Returns:
bitarray: une suite de bits qui représente le texte
"""
with open(nom_fichier, 'r') as f:
reader = f.read()
string_list = []
for char in reader:
string_list += [binary_dict[char]]
bit_list = []
for str in string_list:
for bit in str:
bit_list += [int(bit)]
bits = bitarray(bit_list)
return bits
def compare_size(fichierbase, fichiercompresse):
"""calcule le taux de compression
Args:
fichierbase (string): le fichier de base
fichiercompresse (string): le fichier compressé
"""
taillebase = os.path.getsize(fichierbase)
taillecompresse = os.path.getsize(fichiercompresse)
taux_compression = 1-taillecompresse/taillebase
print(f'le taux de compression est de {taux_compression}')
def lengthonbit(fichierbase, fichiercompresse):
"""calcule le nombre de bit par caractère
Args:
fichierbase (string): le fichier de base
fichiercompresse (string): le fichier compressé
"""
with open(fichierbase, 'r') as f:
reader = f.read()
length = len(reader)
taillecompresse = os.path.getsize(fichiercompresse)
# pour passer de octet/caractère à bit/caractère, il faut diviser par 8 le résultat
bit_par_caractère = taillecompresse/length/8
print(
f'le nombre moyen de bit de stockage par caractère est : {bit_par_caractère} bits')
if __name__ == '__main__':
# 1. création du dictionnaires avec les caractères et leurs fréquences
# nom_fichier = 'extraitalice.txt'
nom_fichier = input('quel fichier voulez vous compresser ?\n')
alphabet = alphabet_frequency(nom_fichier)
liste_caracteres = alphabet.keys()
# 2. création de l'arbre
# je créer les feuilles de mon arbre
liste_feuilles = []
for key in liste_caracteres:
liste_feuilles += [Node(alphabet[key], key)]
# je créer l'arbre de Huffman
arbre = creationarbre(liste_feuilles)[0]
# 3. Codage du texte
# je parcours l'arbre en profondeur pour récupérer la représentation en binaire de chaque caractère
parcours_profondeur = arbre.parcours_profondeur()
# je créer le dictionnaire qui lie caractère et représentation en binaire
new_alphabet = dict()
for result in parcours_profondeur:
new_alphabet[result[0]] = list_to_string(result[2])
# je créer le fichier qui contient le texte compressé
texte_compresse = text_to_bitarray(nom_fichier, new_alphabet)
with open(nom_fichier[:-4]+'_comp.bin', mode='wb',) as new_file:
texte_compresse.tofile(new_file)
# je créer le fichier qui va contenir le dictionnaire contenant l'alphabet ainsi que les fréquences d'apparition des caractères
with open(nom_fichier, mode='r') as f:
reader = f.read()
nb_caracteres = len(reader)
with open(nom_fichier[:-4]+'_freq.txt', mode='w') as new_file:
new_file.write(f'{nb_caracteres}\n')
for key in liste_caracteres:
new_file.write(f'{key} {alphabet[key]}\n')
# 4. Détermination du taux de compression
compare_size(nom_fichier, nom_fichier[:-4]+'_comp.bin')
# 5. Détermination du nombre moyen de bits de stockage d’un caractère du texte compressé
nb_bits = 0
nb_caracteres = 0
for key in liste_caracteres:
nb_caracteres += alphabet[key]
nb_bits += len(new_alphabet[key])*alphabet[key]
print(
f'le nombre moyen de bits de stockage par caractères est : {nb_bits/nb_caracteres} bits')
| ArthurOnWeb/Codage-de-Huffman-PROJ631- | Main.py | Main.py | py | 5,011 | python | fr | code | 0 | github-code | 6 | [
{
"api_name": "bitarray.bitarray",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "os.path.getsize",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 73,
"usage_type": "attribute"
},
{
"api_name": "os.path.getsize",
... |
11005307998 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
import seaborn as sns
from numpy.random import rand
from sklearn import preprocessing
from sklearn import metrics, svm
from sklearn.metrics import plot_confusion_matrix, precision_score
from collections import Counter
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
def split_into_train_and_test(x_all_LF, frac_test=0.5, random_state=None):
if random_state is None:
random_state = np.random
exam,fea = x_all_LF.shape
N = math.ceil(frac_test*exam)
# np.random.RandomState(random_state)
temp = random_state.permutation(x_all_LF)
x_test_NF = temp[0:N,:]
x_train_MF = temp[N:,:]
# print(np.count_nonzero(x_all_LF[:,-1]))
# print(x_all_LF.shape)
# print(np.count_nonzero(x_all_LF.iloc[:,-1]) /x_all_LF.shape[0] )
labels = ['0','1']
plotY = [x_all_LF.shape[0] - np.count_nonzero(x_all_LF.iloc[:,-1]), np.count_nonzero(x_all_LF.iloc[:,-1])]
# plt.pie(plotY, labels = labels)
# plt.suptitle("Distribution of Imbalanced Class")
# plt.show()
# print(x_train_MF)
return x_train_MF, x_test_NF
def oneHotEnc(bank):
for column in bank:
if column == 'y':
temp = bank.y.astype('category').cat.codes
# print(type(temp))
else:
if bank[column].dtypes == object:
temp = pd.get_dummies(bank[column], prefix=column)
else:
temp = bank[column]
try:
# s.append(temp)
s = pd.concat([s, temp], axis=1)
except NameError:
s = pd.DataFrame(data=temp)
s.rename(columns={0: 'y'}, inplace=True)
return s
def labelEncoding(bank):
# le = preprocessing.LabelEncoder()
for column in bank:
if bank[column].dtypes == object:
if column == 'month':
temp = bank[column].astype('category').cat.reorder_categories([ 'jan','feb','mar','apr', 'may', 'jun','jul', 'aug', 'sep','oct', 'nov', 'dec']).cat.codes
else:
temp = bank[column].astype('category').cat.codes
else:
temp = bank[column]
try:
# s.append(temp)
s = pd.concat([s, temp], axis=1)
except NameError:
s = pd.DataFrame(data=temp)
s.rename(columns={0: column}, inplace=True)
# print(s)
return s
class CustomlogisticRegression:
def __init__(self, epoch = None):
if epoch is None:
self.epoch = 1e3
else:
self.epoch = epoch
def fit(self,X,y, lr = 5e-2):
loss = []
weights = rand(X.shape[1])
N = len(X)
for _ in range(int(self.epoch)):
y_hat = self.sigmod(np.dot(X,weights))
weights += lr*np.dot(X.T, y-y_hat) / N
loss.append(self.costFunction(X,y,weights))
self.weights = weights
self.loss = loss
def sigmod(self,z):
return 1 / (1+np.e**(-z))
def costFunction(self,X,y,weights):
z = np.dot(X,weights)
# prediction = self.sigmod(z)
# SSE = np.sum((y - np.where(prediction > 0.5, 1, 0)) ** 2)
predict1 = y* np.log(self.sigmod(z))
predict0 = (1-y)*np.log(1-self.sigmod(z))
return -np.sum(predict0+predict1) / len(X)
def predict(self,X):
z = np.dot(X,self.weights)
return [1 if i > 0.5 else 0 for i in self.sigmod(z)]
def imbalanced(data):
N = data.index.size
dataWithOne = data.loc[data["y"] == 1]
multiInt =(N - dataWithOne.index.size) // dataWithOne.index.size
for _ in range(multiInt):
data = data.append(dataWithOne)
# print(data.index)
# print(data.loc[data["y"] == 1].index)
labels = ['0','1']
plotY = [data.shape[0] - np.count_nonzero(data.iloc[:,-1]), np.count_nonzero(data.iloc[:,-1])]
plt.pie(plotY, labels = labels)
plt.suptitle("Distribution of balanced Class")
return data
def bestCustom(xTrain, yTrain, yTest, xTest):
customModel = CustomlogisticRegression()
maxScore = np.float('-inf')
bestLR = 0
for lr in [1e-1,1e-2,1e-3,1e-4,1e-5,1e-6]:
customModel.fit(xTrain,yTrain,1E4, lr)
score = precision_score(yTest, customModel.predict(xTest))
if score > maxScore:
bestLR = lr
maxScore = score
return bestLR
def multiConfusionPlot(X_train, X_test, y_train, y_test ):
classifiers = {
"customLogistic": CustomlogisticRegression(),
"LogisiticRegression": LogisticRegression(max_iter=1e4),
"KNearest": KNeighborsClassifier(),
"Support Vector Classifier": SVC(),
"MLPClassifier": MLPClassifier(),
}
f, axes = plt.subplots(1, 5, figsize=(20, 5), sharey='row')
for i, (key, classifier) in enumerate(classifiers.items()):
# if classifier == CustomlogisticRegression():
# classifier.fit(X_train,y_train)
# y_pred = classifier.predict(X_test)
# else:
# y_pred = classifier.fit(X_train, y_train).predict(X_test)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
cf_matrix = metrics.confusion_matrix(y_test, y_pred)
disp = metrics.ConfusionMatrixDisplay(cf_matrix)
disp.plot(ax=axes[i], xticks_rotation=45)
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred)
aucScore = metrics.auc(fpr, tpr)
disp.ax_.set_title(key+":"+"{:.2e}".format(aucScore))
disp.im_.colorbar.remove()
disp.ax_.set_xlabel('')
if i != 0:
disp.ax_.set_ylabel('')
f.text(0.4, 0.1, 'Predicted label', ha='left')
plt.subplots_adjust(wspace=0.40, hspace=0.1)
"imBalancedOneHotMinMax"
"BalancedOneHotMinMax"
"BalancedCategoricalMinMax"
f.suptitle("BalancedLabelMinMax")
f.colorbar(disp.im_, ax=axes)
plt.show()
def heatmap(data):
corr = data.corr()
sns.heatmap(corr,annot = True)
plt.show()
def main():
# print("original data")
bank = pd.read_csv("bank.csv", delimiter=';')
# print(bank.head())
# print("after oneHotEncoding")
# df = oneHotEnc(bank)
df = labelEncoding(bank)
# print(df.head())
# print(df.columns)
# print(dfOnehot.head())
# print(dfOnehot.columns)
# heatmap(df)
df = imbalanced(df)
# print(type(df))
# print(df.head())
train_MF, test_NF = split_into_train_and_test(df, frac_test=0.3, random_state=np.random.RandomState(0))
xTest = test_NF[:, :-1]
yTest = test_NF[:, -1]
# print(np.count_nonzero(yTest))
xTrain = train_MF[:, :-1]
yTrain = train_MF[:, -1]
xTrain = (xTrain - np.min(xTrain, axis=0)) / (np.max(xTrain, axis=0) - np.min(xTrain, axis=0))
xTest = (xTest - np.min(xTest, axis=0)) / (np.max(xTest, axis=0) - np.min(xTest, axis=0))
multiConfusionPlot(xTrain,xTest,yTrain,yTest)
if __name__ == "__main__":
main()
| xixihaha1995/cosc5555 | proposal/simpleLogistic.py | simpleLogistic.py | py | 7,185 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "math.ceil",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
... |
15548163858 | import argparse
import itertools
import json
import logging
import sys
from pathlib import Path
from server.src.pdf_tools_core import Document, set_log_level
log = logging.getLogger()
log_handler = logging.StreamHandler()
log.addHandler(log_handler)
log_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
output_path = None
def _get_documents(path, recursive=False) -> list[Document]:
path = Path(path)
if path.is_file():
if path.suffix.lower() == '.pdf':
with path.open('rb') as fp:
file = Document(filename=path, data=fp.read())
return [file]
else:
logging.info(f'File "{path}" is not a PDF document.')
elif path.is_dir():
if recursive:
pattern = '**/*.pdf'
else:
pattern = '*.pdf'
documents = []
if sys.version_info.minor >= 12:
# "case_sensitive" only in 3.12
file_path_generator_object = path.glob(pattern, case_sensitive=False)
else:
generators = [path.glob(pattern), path.glob(pattern.upper())]
file_path_generator_object = itertools.chain(*generators)
for file in file_path_generator_object:
documents += _get_documents(file)
return documents
else:
raise logging.warning(f'"{path}" is neither a file nor a directory.')
def _perform_action(doc: Document, action: str):
global output_path
if action.lower() in ['remove_watermarks']:
doc.remove_watermarks()
elif action.lower() in ['unlock_permissions']:
doc.unlock_permissions()
elif action.lower() in ['edit_bookmarks']:
bookmark_file = doc.file.with_suffix('.json')
if bookmark_file.is_file():
logging.debug(f'updating bookmarks from "{bookmark_file}"')
with bookmark_file.open() as fp:
new_bookmarks: list = json.load(fp)
doc.update_bookmarks(new_bookmarks)
else:
logging.debug(f'creating bookmark file "{bookmark_file}"')
bookmarks = doc.get_bookmarks()
with bookmark_file.open('w+') as fp:
json.dump(bookmarks, fp, indent=4)
elif action.lower() in ['save']:
if not output_path:
output_path = doc.file.parent
output_filename = output_path / (doc.file.stem + '_out' + doc.file.suffix)
i = 2
while output_filename.exists():
output_filename = output_path / (doc.file.stem + f'_out{i}' + doc.file.suffix)
i += 1
logging.debug(f'saving document to "{output_filename}"')
with open(output_filename, 'wb') as fp:
fp.write(doc.to_bytes())
else:
logging.warning(f'ignoring unknown action: {action}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="PDF tools provides helper tools for PDF documents, which can be selected via the actions argument. These actions are then applied to all selected files.")
parser.add_argument('file', help='Path or filename to process')
parser.add_argument('-r', '--recursive', action='store_true', help='Recursively process directory')
parser.add_argument('-a', '--actions', nargs='*',
choices=['remove_watermarks', 'unlock_permissions', 'edit_bookmarks', 'save'],
default=['remove_watermarks', 'unlock_permissions', 'save'],
help='List of actions to perform')
parser.add_argument('-o', '--output', help='Output path for saved files', required=False)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
if args.verbose:
log.setLevel(logging.DEBUG)
set_log_level(logging.DEBUG)
if args.output:
output_path = Path(args.output)
if not output_path.is_dir():
logging.warning('If specified, output path must be a valid directory. Exiting now.')
exit(1)
documents = _get_documents(args.file, args.recursive)
if documents:
logging.debug('found documents:')
for document in documents:
logging.debug(document.file)
logging.debug('')
else:
logging.warning('No documents selected. Exiting now.')
exit(1)
if args.actions:
logging.debug('selected actions:')
for i, action in enumerate(args.actions):
logging.debug(f'{i+1}. {action}')
logging.debug('')
else:
logging.warning('No actions specified. Exiting now.')
exit(1)
for document in documents:
logging.debug(f'processing document "{document.file}"')
for action in args.actions:
_perform_action(document, action)
document.doc.close()
| lukasstorck/py-pdf-tools | pdf_tools_cli.py | pdf_tools_cli.py | py | 4,787 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.StreamHandler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.Formatter",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pathlib.Pat... |
23010318012 | __all__ = (
"__version__",
"AssumedDiagonalGraphTraversal",
"Edge",
"Flow",
"get_path_from_matrix",
"guess_production_exchanges",
"NewNodeEachVisitGraphTraversal",
"Node",
"path_as_brightway_objects",
"to_normalized_adjacency_matrix",
)
from .graph_traversal_utils import get_path_from_matrix, path_as_brightway_objects
from .matrix_tools import guess_production_exchanges, to_normalized_adjacency_matrix
from .utils import get_version_tuple
from .graph_traversal import (
AssumedDiagonalGraphTraversal,
Edge,
Flow,
NewNodeEachVisitGraphTraversal,
Node,
)
__version__ = get_version_tuple()
| brightway-lca/bw_graph_tools | bw_graph_tools/__init__.py | __init__.py | py | 652 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "utils.get_version_tuple",
"line_number": 25,
"usage_type": "call"
}
] |
2124151948 | import asyncio
import inspect
import sys
import json
import socket
from contextlib import redirect_stdout, suppress
from traceback import format_exc
from typing import Dict, Callable
from copy import copy
from gornilo.models.api_constants import *
from gornilo.models.action_names import INFO, CHECK, PUT, GET, TEST
from gornilo.models.checksystem_request import CheckRequest, PutRequest, GetRequest
from gornilo.models.verdict import Verdict
from gornilo.setup_logging import setup_logging
with suppress(ImportError):
import requests
class Checker:
def __init__(self):
self.__info_distribution = {}
self.__multiple_actions = frozenset((PUT, GET))
self.__actions_handlers: Dict[str, Dict[int, Callable[[CheckRequest], Verdict]]] = {
CHECK: None,
PUT: {},
GET: {},
}
@staticmethod
def __check_function(func: callable, func_type: type):
func_name = func.__code__.co_name
func_args_spec = inspect.getfullargspec(func)
if func_args_spec.annotations.get("return") != Verdict:
raise TypeError(f"Checker function ({func_name}) should return {Verdict} object!")
if len(func_args_spec.args) < 1:
raise TypeError(f"{func_name} should have 1 or more args!")
func_arg_name = func_args_spec.args[0]
func_arg_type = func_args_spec.annotations.get(func_arg_name)
if not issubclass(func_arg_type, func_type):
raise TypeError(f"{func_name} first arg should be typed as {func_type}")
def __register_action(self, action_name: str, action: callable, action_period: int = None):
if action_name in self.__multiple_actions:
if action_period is None:
raise ValueError("Period should not be None for multiple actions!")
self.__actions_handlers[action_name][action_period] = action
else:
if action_name in self.__actions_handlers:
if self.__actions_handlers[action_name] is not None:
raise ValueError("Action has been already registered!")
self.__actions_handlers[action_name] = action
else:
raise ValueError("Incorrect action name!")
def __run_tests(self, team_ip) -> int:
from gornilo.utils import measure, generate_flag
from uuid import uuid4
import subprocess
return_codes = []
with measure(CHECK):
check_result = subprocess.run([sys.executable, sys.argv[0], CHECK, team_ip], text=True, capture_output=True)
print(f"Check completed with {check_result.returncode} exitcode, "
f"stdout: {check_result.stdout}, "
f"stderr: {check_result.stderr}")
return_codes.append(check_result.returncode)
info_response = subprocess.run([sys.executable, sys.argv[0], INFO], text=True, capture_output=True).stdout
vulns_amount = len(info_response.split("\n")[0].split(":")) - 1
for i in range(vulns_amount):
flag = generate_flag()
flag_id = str(uuid4())
with measure(f"{PUT} vuln {i + 1}"):
put_result = subprocess.run([sys.executable, sys.argv[0], PUT, team_ip, flag_id, flag, str(i + 1)],
text=True, capture_output=True)
print(f"{PUT} exited with {put_result.returncode}, "
f"stdout: {put_result.stdout}, "
f"stderr: {put_result.stderr}")
return_codes.append(put_result.returncode)
if put_result.stdout:
flag_id = put_result.stdout
with measure(f"{GET} vuln {i + 1}"):
get_result = subprocess.run([sys.executable, sys.argv[0], GET, team_ip, flag_id, flag, str(i + 1)],
text=True, capture_output=True)
print(f"{GET} exited with {get_result.returncode}, "
f"stdout: {get_result.stdout}, "
f"stderr: {get_result.stderr}")
return_codes.append(get_result.returncode)
print(f"All return codes: {return_codes}, using max as a return value. 101 transforms to 0")
return max(return_codes)
def define_check(self, func: callable) -> callable:
self.__check_function(func, CheckRequest)
self.__register_action(CHECK, func)
return func
def define_put(self, vuln_num: int, vuln_rate: int) -> callable:
if not isinstance(vuln_num, int) or vuln_num < 1:
raise TypeError(f'You should provide vulnerability natural number as a decorator argument!')
def wrapper(func: callable):
self.__check_function(func, PutRequest)
self.__register_action(PUT, func, vuln_num)
self.__info_distribution[vuln_num] = vuln_rate
return func
return wrapper
def __extract_info_call(self):
return VULNS + VULNS_SEP.join(str(self.__info_distribution[key]) for key in sorted(self.__info_distribution))
def define_get(self, vuln_num: int) -> callable:
if not isinstance(vuln_num, int) or vuln_num < 1:
raise TypeError(f'You should provide vulnerability natural number as a decorator argument!')
def wrapper(func: callable):
self.__check_function(func, GetRequest)
self.__register_action(GET, func, vuln_num)
return func
return wrapper
def __async_wrapper(self, func_result):
if asyncio.iscoroutine(func_result):
return asyncio.run(func_result)
return func_result
def __try_extract_public_flag_id(self, request_content: dict) -> dict or None:
try:
request_content = copy(request_content)
flag_id = request_content["flag_id"]
json_flag_id = json.loads(flag_id)
public_flag_id = json_flag_id.pop(PUBLIC_FLAG_ID)
private_flag_id = json_flag_id.pop(PRIVATE_CONTENT)
request_content[PUBLIC_FLAG_ID] = public_flag_id
if not isinstance(private_flag_id, str):
private_flag_id = json.dumps(private_flag_id)
request_content["flag_id"] = private_flag_id
return request_content
except Exception:
# any exception here means something gone wrong with json;
# should fallback to legacy models
return None
# noinspection PyProtectedMember
def run(self, *args):
setup_logging()
result = Verdict.CHECKER_ERROR("Something gone wrong")
try:
if not args:
args = sys.argv[1:]
with redirect_stdout(sys.stderr):
result = self.__run(*args)
if type(result) != Verdict:
print(f"Checker function returned not Verdict value, we need to fix it!", file=sys.stderr)
result = Verdict.CHECKER_ERROR("")
except Verdict as verdict:
result = verdict
except Exception as e:
print(f"Checker caught an error: {e},\n {format_exc()}", file=sys.stderr)
result = Verdict.CHECKER_ERROR("")
if isinstance(e, socket.timeout):
result = Verdict.DOWN("Socket timeout")
if "requests" in globals() and any(isinstance(e, exc) for exc in (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
requests.exceptions.TooManyRedirects)):
result = Verdict.DOWN("Could not process routine due to timeout or connection error!")
finally:
if result._public_message:
print(result._public_message, file=sys.stdout)
sys.exit(result._code)
def __run(self, command=None, hostname=None, flag_id=None, flag=None, vuln_id=None) -> Verdict:
commands = [CHECK, PUT, GET, INFO, TEST]
if command is None:
raise ValueError("Expected 1 or more args!")
command = command.upper()
if command not in commands:
raise ValueError(f"Unknown ({command}) command! (Expected one of ({','.join(commands)})")
if command == INFO:
return Verdict.OK(self.__extract_info_call())
if hostname is None:
raise ValueError("Can't find 'hostname' arg! (Expected 2 or more args)")
check_func = self.__actions_handlers[CHECK]
request_content = {
"hostname": hostname
}
if command == CHECK:
# noinspection PyCallingNonCallable
return self.__async_wrapper(check_func(CheckRequest(**request_content)))
if command == TEST:
return_code = self.__run_tests(hostname)
return Verdict(0 if return_code == 101 else return_code, "Tests has been finished")
if flag_id is None:
raise ValueError("Can't find 'flag_id' arg! (Expected 3 or more args)")
if flag is None:
raise ValueError("Can't find 'flag' arg (Expected 4 or more args)")
if vuln_id is None:
raise ValueError("Can't find 'vuln_id' arg (Expected 5 or more args)")
try:
vuln_id = int(vuln_id)
assert vuln_id > 0
assert vuln_id in self.__actions_handlers[PUT]
assert vuln_id in self.__actions_handlers[GET]
except (TypeError, AssertionError):
raise ValueError("'vuln_id' should be representative as a natural number, "
f"{GET}/{PUT} methods should be registered in checker!")
put_func = self.__actions_handlers[PUT][vuln_id]
get_func = self.__actions_handlers[GET][vuln_id]
request_content.update({
"flag_id": flag_id,
"flag": flag,
"vuln_id": vuln_id
})
if command == PUT:
return self.__async_wrapper(put_func(PutRequest(**request_content)))
if command == GET:
result = self.__try_extract_public_flag_id(request_content)
return self.__async_wrapper(get_func(GetRequest(**(result or request_content))))
raise RuntimeError("Something went wrong with checker scenario :(")
| HackerDom/Gornilo | gornilo/actions.py | actions.py | py | 10,242 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "contextlib.suppress",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "gornilo.models.action_names.PUT",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "gornilo.models.action_names.GET",
"line_number": 26,
"usage_type": "name"
},
{
... |
36275046877 | import pygame
SCROLLBAR_THICKNESS = 20
BUTTON_SCROLL_WHEEL_UP = 4
BUTTON_SCROLL_WHEEL_DOWN = 5
SCROLL_SPEED = 20
VSPACE = 20
class ScrolledPanel(pygame.Surface):
def __init__(self, display, x, y, width, height, vspace=VSPACE, background_color=(255, 255, 255)):
pygame.Surface.__init__(self, (width, height))
self.focus = False
self.label = ''
self.display = display
self.x = x
self.y = y
self.width = width
self.height = height
self.vspace = vspace
self.background_color = background_color
self.available_width = self.width - SCROLLBAR_THICKNESS
self.virtual_height = 0
self.content_surface = pygame.Surface((self.available_width, self.virtual_height))
self.surfaces = []
self.rect = self.get_rect()
self.ratio = 1.0
self.track = pygame.Rect(self.rect.right - SCROLLBAR_THICKNESS,
self.rect.top, SCROLLBAR_THICKNESS,
self.rect.height)
self.knob = pygame.Rect(self.track)
self.knob.height = self.track.height * self.ratio
self.scrolling = False
self.mouse_in_me = False
self.cursor = -1
def buildScrollbar(self):
self.rect = self.get_rect()
if self.rect.height < self.content_surface.get_height():
self.ratio = (1.0 * self.rect.height) / self.content_surface.get_height()
self.track = pygame.Rect(self.rect.right - SCROLLBAR_THICKNESS,
self.rect.top, SCROLLBAR_THICKNESS,
self.rect.height)
self.knob = pygame.Rect(self.track)
self.knob.height = self.track.height * self.ratio
def getAvailableWidth(self):
return self.available_width
def getVirtualHeight(self):
height = 0
last = len(self.surfaces) - 1
for i, surface in enumerate(self.surfaces):
height += surface.get_height()
if i is not last:
height += self.vspace
return height
def addSurface(self, surface):
self.surfaces.append(surface)
self.virtual_height = self.getVirtualHeight()
self.content_surface = pygame.Surface((self.available_width, self.virtual_height))
if self.cursor == -1:
self.cursor = 0
self.buildScrollbar()
def clearSurfaces(self):
self.surfaces = []
self.cursor = -1
self.ratio = 1.0
def getSurface(self, surface_id):
for surface in self.surfaces:
if surface.id == surface_id:
return surface
def getSurfaces(self):
return self.surfaces
def getClickedSurface(self):
for surf in self.surfaces:
if surf.getClicked():
return surf
return None
def setFocus(self, value):
self.focus = value
def getFocus(self):
return self.focus
def setLabel(self, label):
self.label = label
def getLabel(self):
label = self.label
if label != '':
label += ': '
if self.cursor == -1:
label += self.display.translator.translate('empty')
else:
try:
label += self.surfaces[self.cursor].getLabel()
except AttributeError:
label += 'unknown'
return label
def getVSpace(self):
return self.vspace
def getPos(self):
return self.x, self.y
def handleEvent(self, event):
if self.mouse_in_me:
for surface in self.surfaces:
surface.handleEvent(event)
if event.type == pygame.MOUSEMOTION and self.scrolling:
if event.rel[1] != 0:
move = max(event.rel[1], self.track.top - self.knob.top)
move = min(move, self.track.bottom - self.knob.bottom)
if move != 0:
self.knob.move_ip(0, move)
new_y = self.knob.top / self.ratio
for surface in self.surfaces:
surface.setNewYPos(surface.getYPos() - new_y)
elif event.type == pygame.MOUSEBUTTONDOWN and self.knob.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.scrolling = True
elif event.type == pygame.MOUSEBUTTONUP:
self.scrolling = False
if event.type == pygame.MOUSEBUTTONDOWN and not self.knob.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.focus = False
if event.type == pygame.MOUSEMOTION and self.rect.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.mouse_in_me = True
elif event.type == pygame.MOUSEMOTION and not self.rect.collidepoint(
event.pos[0] - self.x, event.pos[1] - self.y):
self.mouse_in_me = False
if self.mouse_in_me and event.type == pygame.MOUSEBUTTONDOWN:
move = 0
if event.button == BUTTON_SCROLL_WHEEL_UP:
move = max(-1 * SCROLL_SPEED * self.ratio,
self.track.top - self.knob.top)
elif event.button == BUTTON_SCROLL_WHEEL_DOWN:
move = max(SCROLL_SPEED * self.ratio, self.track.top - self.knob.top)
move = min(move, self.track.bottom - self.knob.bottom)
if move != 0:
self.knob.move_ip(0, move)
new_y = self.knob.top / self.ratio
for surface in self.surfaces:
surface.setNewYPos(surface.getYPos() - new_y)
if self.focus and self.cursor >= 0 and event.type == pygame.KEYDOWN:
old_cursor = self.cursor
speak = False
if event.key == pygame.K_DOWN:
self.cursor = min(self.cursor + 1, len(self.surfaces)-1)
speak = True
elif event.key == pygame.K_UP:
self.cursor = max(self.cursor - 1, 0)
speak = True
if old_cursor != self.cursor:
self.surfaces[old_cursor].setClicked(False)
self.surfaces[self.cursor].setClicked(True)
try:
self.surfaces[old_cursor].getDeselectCallback()(self.surfaces[old_cursor])
self.surfaces[self.cursor].getSelectCallback()(self.surfaces[self.cursor])
except (AttributeError, TypeError):
pass
if speak:
try:
self.display.view.speak(self.surfaces[self.cursor].getLabel())
except AttributeError:
self.display.view.speak('unknown')
def update(self):
pass
def render(self):
self.fill(self.background_color)
self.content_surface.fill(self.background_color)
surface_pos_y = 0
for surface in self.surfaces:
surface.render()
self.content_surface.blit(surface, (0, surface_pos_y))
surface_pos_y += surface.get_height() + self.vspace
self.blit(self.content_surface, (0, (self.knob.top / self.ratio) * -1))
if self.ratio != 1.0:
pygame.draw.rect(self, (192, 192, 192), self.track, 0)
pygame.draw.rect(self, (0, 0, 0), self.knob.inflate(-4, -4), 3) | Timtam/cards-against-humanity | client/scrolled_panel.py | scrolled_panel.py | py | 6,728 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "pygame.Surface",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pygame.Surface.__init__",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.Surface",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygam... |
39717267724 | from enum import Enum
class VarTypes(Enum):
INT = "int"
FLOAT = "float"
BOOL = "bool"
STRING = "string"
VECTOR = "vector"
VOID = "void"
class Ops(Enum):
POW = "^"
NEG = "neg"
POS = "pos"
NOT_ = "not"
MAT_MULT = "@"
DOT = ".."
MULT = "*"
DIV = "/"
INT_DIV = "//"
PLUS = "+"
MINUS = "-"
GT = ">"
GT_EQ = ">="
LT = "<"
LT_EQ = "<="
EQ = "=="
NOT_EQ = "!="
AND_ = "and"
OR_ = "or"
ASSIGN = "="
GOTO = "GOTO"
GOTOF = "GOTOF"
RETURN_ = "RETURN"
GOSUB = "GOSUB"
PARAM = "PARAM"
ERA = "ERA"
ENDPROC = "ENDPROC"
PRINT = "PRINT"
PRINTLN = "PRINTLN"
READT = "READT"
READA = "READA"
READC = "READC"
WRITEF = "WRITEF"
PLOT = "PLOT"
ALLOC = "ALLOC"
VER = "VER"
class VecFilters(Enum):
F_SUM = "f_sum"
F_MEAN = "f_mean"
F_VAR = "f_var"
F_MIN = "f_min"
F_MAX = "f_max"
F_STD = "f_std"
F_NORMALIZE = "f_normalize"
F_SQUARE = "f_square"
F_CUBE = "f_cube"
F_STRIP = "f_strip"
F_LOWERCASE = "f_lowercase"
F_UPPERCASE = "f_uppercase"
F_SORT = "f_sort"
F_REVERSE = "f_reverse"
class SemanticCube(object):
"""Hold the semantic considerations table for Doflir."""
def __init__(self):
self._setup_op_categories()
self._setup_cube()
self._setup_enums_map()
self._setup_filter_reduce()
def _setup_filter_reduce(self):
"""Defines semantic considerations for filter's reduction to var."""
self._filter_reduce = {
VecFilters.F_SUM: True,
VecFilters.F_MEAN: True,
VecFilters.F_MIN: True,
VecFilters.F_MAX: True,
VecFilters.F_STD: True,
VecFilters.F_VAR: True,
VecFilters.F_NORMALIZE: False,
VecFilters.F_SQUARE: False,
VecFilters.F_CUBE: False,
VecFilters.F_STRIP: False,
VecFilters.F_LOWERCASE: False,
VecFilters.F_UPPERCASE: False,
VecFilters.F_SORT: False,
VecFilters.F_REVERSE: False,
}
def _setup_op_categories(self):
"""Defines groups of operations by their function."""
self._NUM_OPS = [
Ops.PLUS, Ops.MINUS, Ops.MULT, Ops.DIV, Ops.INT_DIV, Ops.POW,
Ops.MAT_MULT, Ops.DOT,
]
self._VEC_OPS = [
Ops.MAT_MULT, Ops.DOT, Ops.PLUS, Ops.MINUS,
]
self._REL_OPS = [
Ops.AND_, Ops.OR_, Ops.GT, Ops.GT_EQ, Ops.LT, Ops.LT_EQ, Ops.EQ,
Ops.NOT_EQ,
]
def _setup_enums_map(self):
"""Provides conversion mechanisms between operation codes and names."""
self._ops_map = {}
for op in Ops:
self._ops_map[op.value] = op
self._var_types_map = {}
for var_type in VarTypes:
self._var_types_map[var_type.value] = var_type
self._vec_filters_map = {}
for vec_filter in VecFilters:
self._vec_filters_map[vec_filter.value] = vec_filter
def _setup_cube(self):
"""Provides expected output type for a pair of operands and op."""
semantic_cube = {}
# Setup numeric operations results.
for op in self._NUM_OPS:
int_op = (VarTypes.INT, VarTypes.INT, op)
semantic_cube[int_op] = VarTypes.INT
float_op = (VarTypes.FLOAT, VarTypes.FLOAT, op)
semantic_cube[float_op] = VarTypes.FLOAT
float_int_op = (VarTypes.FLOAT, VarTypes.INT, op)
semantic_cube[float_int_op] = VarTypes.FLOAT
int_float_op = (VarTypes.INT, VarTypes.FLOAT, op)
semantic_cube[int_float_op] = VarTypes.FLOAT
# Division always produces float.
div_op = (VarTypes.INT, VarTypes.INT, Ops.DIV)
semantic_cube[div_op] = VarTypes.FLOAT
# Int division always produces int.
div_op = (VarTypes.FLOAT, VarTypes.INT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
div_op = (VarTypes.INT, VarTypes.FLOAT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
div_op = (VarTypes.FLOAT, VarTypes.FLOAT, Ops.INT_DIV)
semantic_cube[div_op] = VarTypes.INT
# Setup boolean results for relational operations.
for op in self._REL_OPS:
bool_op = (VarTypes.BOOL, VarTypes.BOOL, op)
semantic_cube[bool_op] = VarTypes.BOOL
int_op = (VarTypes.INT, VarTypes.INT, op)
semantic_cube[int_op] = VarTypes.BOOL
float_op = (VarTypes.FLOAT, VarTypes.FLOAT, op)
semantic_cube[float_op] = VarTypes.BOOL
str_op = (VarTypes.STRING, VarTypes.STRING, op)
semantic_cube[str_op] = VarTypes.BOOL
float_int_op = (VarTypes.FLOAT, VarTypes.INT, op)
semantic_cube[float_int_op] = VarTypes.BOOL
int_float_op = (VarTypes.INT, VarTypes.FLOAT, op)
semantic_cube[int_float_op] = VarTypes.BOOL
# String concatenation.
str_op = (VarTypes.STRING, VarTypes.STRING, Ops.PLUS)
semantic_cube[str_op] = VarTypes.STRING
# Setup results for vector operations.
for op in self._VEC_OPS:
vec_op = (VarTypes.VECTOR, VarTypes.VECTOR, op)
semantic_cube[vec_op] = VarTypes.VECTOR
self._cube = semantic_cube
def is_reduced(self, vec_filter):
"""Accessor for the vec filtering semantic considerations."""
return self._filter_reduce[vec_filter]
def result_type(self, op_1_type, op_2_type, operator):
"""Accessor for the semantic cube."""
target = (op_1_type, op_2_type, operator)
if target in self._cube:
return self._cube[target]
else:
return None
def result_type_str(self, op_1_type, op_2_type, operator):
"""Accessor for the semantic cube but takes a txt instead of enum."""
op_1_enum = self.type_to_enum(type_str=op_1_type)
op_2_enum = self.type_to_enum(type_str=op_2_type)
operator_enum = self.op_to_enum(op_str=operator)
return self.result_type(
op_1_type=op_1_enum,
op_2_type=op_2_enum,
operator=operator_enum
)
def type_to_enum(self, type_str):
"""Shorthand method for conversion of names to enum types."""
return self._var_types_map[type_str]
def op_to_enum(self, op_str):
"""Shorthand method for conversion of names to enum ops."""
return self._ops_map[op_str]
def filter_to_enum(self, filter_str):
"""Shorthand method for conversion of names to enum filters."""
return self._vec_filters_map[filter_str]
| Irvel/doflir | SemanticCube.py | SemanticCube.py | py | 6,726 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "enum.Enum",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 52,
"usage_type": "name"
}
] |
18536127088 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
input_file_aclevel = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Ac-level_cluster.csv'
df_aclevel = pd.read_csv(input_file_aclevel, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_aclevel_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_aclevel_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_aclevel_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_aclevel.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_aclevel['Test correlation'] = df_aclevel_test['Test correlation']
df_aclevel['Valid correlation'] = df_aclevel_valid['Valid correlation']
df_aclevel['Train correlation'] = df_aclevel_train['Train correlation']
input_file_subclass = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Subclass.csv'
df_subclass = pd.read_csv(input_file_subclass, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_subclass_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_subclass_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_subclass_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_subclass.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_subclass['Test correlation'] = df_subclass_test['Test correlation']
df_subclass['Valid correlation'] = df_subclass_valid['Valid correlation']
df_subclass['Train correlation'] = df_subclass_train['Train correlation']
input_file_class = '/exports/humgen/idenhond/data/basenji_preprocess/human_atac_targets_Class.csv'
df_class = pd.read_csv(input_file_class, sep = '\t').rename(columns = {'Unnamed: 0' : 'Index per level'})
df_class_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Test correlation'})
df_class_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Valid correlation'})
df_class_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac_class.csv',index_col = 0).rename(columns = {'0' : 'Train correlation'})
df_class['Test correlation'] = df_class_test['Test correlation']
df_class['Valid correlation'] = df_class_valid['Valid correlation']
df_class['Train correlation'] = df_class_train['Train correlation']
#concat all dataframes
print(f'Number of tracks ac level: {df_aclevel.shape[0]}')
print(f'Number of tracks subclass: {df_subclass.shape[0]}')
print(f'Number of tracks class: {df_class.shape[0]}')
df = pd.concat([df_class, df_subclass, df_aclevel], ignore_index=True)
# test and validation and train correlation of model trained on 66 human atac seq tracks
df_correlation_test = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_test_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Test correlation All tracks'})
df_correlation_valid = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_valid_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Validation correlation All tracks'})
df_correlation_train = pd.read_csv('/exports/humgen/idenhond/data/evaluate_correlation/correlation_per_track_train_humanatac.csv').tail(-1).rename(columns = {'Unnamed: 0' : 'Index old', '0' : 'Train correlation All tracks'})
df = df.merge(df_correlation_test, left_on='Index old', right_on = 'Index old')
df = df.merge(df_correlation_valid, left_on='Index old', right_on = 'Index old')
df = df.merge(df_correlation_train, left_on='Index old', right_on = 'Index old')
print(df.columns)
print(df[['Test correlation', 'level']])
print(f'mean correlation score Test: {df["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test per class: {df["Test correlation"].mean(axis=0):.4f}')
print(f'mean correlation score Test class: {df[df["level"] == "Class"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test subclass: {df[df["level"] == "Subclass"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Test ac level: {df[df["level"] == "Ac-level cluster"]["Test correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Validation: {df["Validation correlation All tracks"].mean(axis=0):.4f}')
print(f'mean correlation score Train: {df["Train correlation All tracks"].mean(axis=0):.4f}')
exit()
# plot correlation of old model (66 tracks) vs new models (trained per level)
# plt.figure()
# plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
# sns.scatterplot(data = df, x = 'Test correlation All tracks', y = 'Test correlation', hue = 'level')
# plt.xlabel('Model trained on all pseudo bulk cell type profiles')
# plt.ylabel('Models trained on pseudo bulk cell type profiles ')
# plt.title('Test set')
# plt.legend(title = 'Model level')
# plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_test.png', bbox_inches='tight', dpi = 300)
# plt.close()
# plt.figure()
# plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
# sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level')
# plt.xlabel('Model trained on all pseudo bulk cell type profiles')
# plt.ylabel('Models trained on one level of pseudo bulk cell type profiles ')
# plt.title('Validation set')
# plt.legend(title = 'Model level')
# plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_valid.png', bbox_inches='tight', dpi = 300)
# plt.close()
plt.figure()
plt.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level')
plt.xlabel('Model trained on all pseudo bulk cell type profiles')
plt.ylabel('Models trained on pseudo bulk cell type profiles ')
plt.title('Train set')
plt.legend(title = 'Model level')
plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot_train.png', bbox_inches='tight', dpi = 300)
plt.close()
fig, (ax1, ax2, ax3) = plt.subplots(1, ncols = 3, sharex=True, sharey=True, constrained_layout=True, figsize=(10, 4.8))
sns.despine(top=True, right=True, left=False, bottom=False)
ax1.set_aspect('equal', adjustable='box')
ax2.set_aspect('equal', adjustable='box')
ax3.set_aspect('equal', adjustable='box')
ax1.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
ax2.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
ax3.axline((0, 0), (1, 1), linewidth=0.5, color='k', linestyle = 'dashed')
sns.scatterplot(data = df, x = 'Test correlation All tracks', y = 'Test correlation', hue = 'level', ax = ax1)
sns.scatterplot(data = df, x = 'Validation correlation All tracks', y = 'Valid correlation', hue = 'level', ax = ax2)
sns.scatterplot(data = df, x = 'Train correlation All tracks', y = 'Train correlation', hue = 'level', ax = ax3)
ax1.set_xlabel(None)
ax1.set_ylabel(None)
ax2.set_xlabel(None)
ax2.set_ylabel(None)
ax3.set_xlabel(None)
ax3.set_ylabel(None)
ax1.get_legend().remove()
ax2.get_legend().remove()
ax1.title.set_text('Test set')
ax2.title.set_text('Validation set')
ax3.title.set_text('Train set')
ax1.text(0.9, 0.03, '0.531', fontsize = 8, ha='center', va='center', transform=ax1.transAxes)
ax2.text(0.9, 0.03, '0.493', fontsize = 8, ha='center', va='center', transform=ax2.transAxes)
ax3.text(0.9, 0.03, '0.551', fontsize = 8, ha='center', va='center', transform=ax3.transAxes)
ax3.legend(loc = 'upper left', bbox_to_anchor=(1.1, 1.05))
ax3.get_legend().set_title('Level')
# fig.supxlabel('Model trained on all pseudo bulk cell type profiles')
plt.figtext(.5, .17, 'Model trained on all pseudo bulk cell type profiles', fontsize=9, ha='center')
fig.supylabel(f' Models trained on one level \nof pseudo bulk cell type profiles', fontsize = 9)
plt.savefig('/exports/humgen/idenhond/projects/enformer/correlation/plots_paper/Plots_paper/Fig3_ATAC/atac_perlevel_scatterplot.png', bbox_inches='tight', dpi = 300)
| icdh99/LUMC_internship_enformer_continual | enformer/correlation/plots_paper/correlation_atac.py | correlation_atac.py | py | 9,010 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"l... |
73815172026 | import selenium.webdriver
from bonobo_selenium._version import __version__
USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/601.4.4 (KHTML, like Gecko) Version/9.0.3 Safari/601.4.4'
def create_profile(use_tor=False):
_profile = selenium.webdriver.FirefoxProfile()
_profile.set_preference("toolkit.startup.max_resumed_crashes", "-1")
if use_tor:
# tor connection
_profile.set_preference('network.proxy.type', 1)
_profile.set_preference('network.proxy.socks', '127.0.0.1')
_profile.set_preference('network.proxy.socks_port', 9050)
# user agent
_profile.set_preference("general.useragent.override", USER_AGENT)
return _profile
def create_browser(profile):
_browser = selenium.webdriver.Firefox(profile)
# _browser.implicitly_wait(10)
# _browser.set_page_load_timeout(10)
return _browser
def create_chrome_browser():
browser = selenium.webdriver.Chrome()
return browser
__all__ = [
'USER_AGENT',
'__version__',
'create_browser',
'create_profile',
]
| python-bonobo/bonobo-selenium | bonobo_selenium/__init__.py | __init__.py | py | 1,078 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "selenium.webdriver.webdriver.FirefoxProfile",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.webdriver",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "selenium.webdriver",
"line_number": 9,
"usage_type": "name"
... |
7894457497 | from flask import Flask, render_template, request
import os
import json
from nova_code import start_vm
from swift_code import upload_to_container, check_file_exists
container_upload = 'uploads'
container_download = 'rendered'
environ = json.load(open(os.environ['CRED_FILE']))['CONFIG']['CONFIG_VARS']
app = Flask(__name__)
app.debug = True
@app.route("/example", methods=['GET', 'POST'])
def example(btn_clicked=""):
if request.method == 'POST':
filename = request.form['files']
mail = request.form['mail']
f = open("examples/" + filename, 'r')
obj_name = upload_to_container(f, container_upload, environ)
start_vm(container_upload, obj_name, container_download, environ, mail)
return render_template('upload.jinja', btn_clicked=obj_name)
else:
files = os.listdir("examples")
return render_template('upload.jinja', btn_clicked='example', files=files)
@app.route("/")
@app.route('/upload', methods=['GET', 'POST'])
def upload_file(btn_clicked=""):
if request.method == 'POST':
f = request.files['FileToUpload']
mail = request.form['mail']
obj_name = upload_to_container(f, container_upload, environ)
start_vm(container_upload, obj_name, container_download, environ, mail)
return render_template('upload.jinja', btn_clicked=obj_name)
else:
return render_template('upload.jinja', btn_clicked='no')
@app.route('/file/<filename>')
def show_output(filename):
if check_file_exists(filename, container_download, environ):
return render_template('download.jinja', filename=filename)
else:
return render_template('wait.jinja', filename=filename)
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(os.environ['PORT']))
| stepanvanecek/cah-blender | main.py | main.py | py | 1,784 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_... |
73825796027 | """
分类算法应用案例-汽车金融预测用户是否会贷款买车
"""
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
import pydotplus
# 文件路径
data_path = 'C:/Users/Lenovo/Desktop/car.csv'
# 读取数据文件
data_frame = pd.read_csv(data_path, encoding='gbk')
# print(data_frame.head())
# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(data_frame.values[:, :-1], data_frame.values[:, -1], test_size=0.3)
# 决策树调参---1.分支依据
def adjust_criterion():
# 参数列表
param_dic = {'criterion':['gini', 'entropy']}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_))
# 决策树调参---2.深度
def adjust_depth():
# 参数列表
param_dic = {'max_depth': range(1, 10)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(criterion='gini'), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_))
# 决策树调参---3.最大叶子结点数
def adjust_max_leaf_nodes():
# 参数列表
param_dic = {'max_leaf_nodes': range(2, 100)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=DecisionTreeClassifier(criterion='gini', max_depth=4), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_)) # 14
# KNN调参 K
def adjust_k():
# 参数列表
param_dic = {'n_neighbors': range(1, 20)}
# 构建网格搜索器
gscv = GridSearchCV(estimator=KNeighborsClassifier(), param_grid=param_dic, scoring='roc_auc')
# 训练
gscv.fit(X_train, y_train)
print('best_params:{0}'.format(gscv.best_params_))
print('best_score:{0}'.format(gscv.best_score_)) # 3
# 用每个算法选出来的最优参数预测并得出ROC曲线
def plot_roc(clfs):
"""
:param clf: 分类器列表
:return: None
"""
for index, clf in enumerate(clfs):
# 训练数据
clf.fit(X_train, y_train)
# 输出混淆矩阵
pre = clf.predict(X_test)
# 输出预测测试集的概率
y_prb_1 = clf.predict_proba(X_test)[:, 1]
# 得到误判率、命中率、门限
fpr, tpr, thresholds = roc_curve(y_test, y_prb_1)
# 计算auc
roc_auc = auc(fpr, tpr)
# 绘图
plt.plot(fpr, tpr, label='{0}_AUC = {1:.2f}'.format(index, roc_auc))
# 对ROC曲线图正常显示做的参数设定
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
plt.title('ROC曲线')
# 设置x、y轴刻度范围
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.legend(loc='lower right')
# 绘制参考线
plt.plot([0, 1], [0, 1], 'r--')
plt.ylabel('命中率')
plt.xlabel('误判率')
plt.show()
# 输出树形图
def out_image():
# 模型初始化
clf = DecisionTreeClassifier(criterion='gini', max_depth=4, max_leaf_nodes=14)
# 训练模型
clf.fit(X_train, y_train)
# 输出png(pdf)图形文件
dot_data = tree.export_graphviz(clf, out_file=None, filled=True, rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
graph.write_png(data_path.replace('.csv', '.png'))
if __name__ == '__main__':
# adjust_criterion()
# adjust_depth()
# adjust_max_leaf_nodes()
# adjust_k()
# clfs = [DecisionTreeClassifier(criterion='gini', max_depth=4, max_leaf_nodes=14), KNeighborsClassifier(n_neighbors=3)]
# plot_roc(clfs)
out_image() | ghostlyFeng/ML | Cluster/car.py | car.py | py | 4,212 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.GridSearchCV",
"line_number": 30,
"usage_type": "call"
... |
19007770169 | import traceback,json,pdb
from datetime import date,timedelta,datetime
import pandas as pd
from flask import jsonify
from backEnd.database.db_connection import set_connection
from answergen import create_single_column_response,create_multi_column_response,get_highlight_response
from frontendAPI import city_region_mapping
todays_date = str(datetime.now().date())
def get_table_df(table_name, db_name,movie_name):
connection = set_connection(db_name)
# cursor = connection.cursor()
try:
table = pd.read_sql(
'SELECT avg(Seat_Percent) as occupancy,count(*) as shows,Crawl_Hour,City_Name,Movie_Name,Show_date '
'FROM {0} where Movie_Name = "{1}" and Crawl_Hour = 18 and Show_Date = "{2}" group by '
'Crawl_Hour,City_Name,Show_Date,Movie_Name'.format(table_name,movie_name,todays_date), con=connection)
table = table.fillna('')
table = table.replace('National-Capital-Region-NCR','NCR')
return table
except Exception:
print(traceback.format_exc())
def get_response_city(movie_name,sortby,filterby,groupby=False):
#default
mid_txt = " Cities with highest {} are ".format(sortby)
resp_count = 4
sort = False
filterAlais = "Top cities"
#test alias
sortbyAlias=sortby
if sortby.lower() == "occupancy":
sortbyAlias = "% occupancy"
if sortby.lower() == "shows":
sortbyAlias = " shows"
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest {} is ".format(sortby)
sort = False
filterAlais=" Cities with higher {}".format(sortby)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest {} is ".format(sortby)
sort = True
filterAlais = " Cities with lower {}".format(sortby)
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
print(df_table)
df_table['occupancy'] = round(df_table['occupancy'],2)
cityAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
cityAns+='<br><br/>'
cityAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
sort = not sort
cityAns+='<br><br/>'
cityAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower {} include ".format(sortby),
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return cityAns
def get_response_performance_city(movie_name,sortby,filterby,groupby=False):
#default
mid_txt = " Cities with highest performance {} are ".format(sortby)
resp_count = 4
sort = False
filterAlais = " Cities with high performance"
#test alias
# sortbyAlias=sortby
sortbyAlias = "%"
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest performance {} is ".format(sortby)
sort = False
filterAlais=" Cities with high performance {}".format(sortby)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest performance {} is ".format(sortby)
sort = True
filterAlais = " Cities with low performance {}".format(sortby)
#get table from Db
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
#adding volume, percentage column to df
df_table = df_with_performance_volume_percentage(df_table)
print(df_table)
print(df_table['percentage'].sum())
#Converting dataframe to readable text response.
perfAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with approx ', n_answer=3, sort_by=sortby,
sort_asc=sort)
print(perfAns)
sort = not sort
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower performance include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return perfAns
def get_response_performance_region(movie_name,sortby,filterby,regionName):
#default
mid_txt = " Cities with highest performance {0} in {1} India are ".format(sortby,regionName)
resp_count = 4
sort = False
filterAlais = " Cities with high performance in {} India".format(regionName)
#test alias
# sortbyAlias=sortby
sortbyAlias = "%"
try:
#filterby drill
if filterby:
if "highest" in filterby.lower():
resp_count = 1
mid_txt = "City with highest performance {0} in {1} India is ".format(sortby,regionName)
sort = False
filterAlais=" Cities with high performance {0} in {1} India ".format(sortby,regionName)
if "lowest" in filterby.lower():
resp_count = 1
mid_txt = "City with lowest performance {0} in {1} India is ".format(sortby,regionName)
sort = True
filterAlais=" Cities with low performance {0} in {1} India ".format(sortby,regionName)
#get table from Db
df_table = get_table_df('BMS_Regional_Occupancy', 'disney', movie_name)
#filtering cities as per region
city_list = city_region_mapping.region_list[regionName.lower()]
df_table = df_table[df_table['City_Name'].isin(city_list)]
#adding volume, percentage column to df
df_table = df_with_performance_volume_percentage(df_table)
print(df_table)
print(df_table['percentage'].sum())
#Converting dataframe to readable text response.
perfAns = create_single_column_response(df_table, 'City_Name',mid_txt, n_answer=resp_count, sort_by=sortby,
sort_asc=sort)
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, '{} include '.format(filterAlais),
answer_suffix=sortbyAlias, answer_prefix=' with approx ', n_answer=3, sort_by=sortby,
sort_asc=sort)
print(perfAns)
sort = not sort
perfAns+='<br><br/>'
perfAns+=create_multi_column_response(df_table, 'City_Name', sortby, " Cities with lower performance include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
except Exception as e:
print(e)
perfAns = "Data for {} is not available".format(regionName)
return (perfAns)
def get_BMS_likes(movie_name):
current_date = date_shifter("2019-10-10",0)
week_before = "2019-10-10"
yesterday_date = "2019-10-09"
connection = set_connection('disney')
table_name = "BMS_User_Likes"
table = pd.read_sql(
'SELECT Likes,Crawl_Date from {0} where Movie = "{1}" '
.format(table_name,movie_name), con=connection)
table = table.sort_values(by='Crawl_Date')
print(table)
ans_likes = ""
#pdb.set_trace()
try:
current_likes = table[table['Crawl_Date']==datetime.now().date()]
current_likes = int(current_likes['Likes'].values[0])
print(current_likes)
ans_likes = "{0} has {1} likes.".format(movie_name,get_highlight_response(current_likes))
yesterdays_likes = table[table['Crawl_Date']==date_shifter(todays_date,-1)]
yesterdays_likes = int(yesterdays_likes['Likes'].values[0])
if yesterdays_likes:
ans_likes += "<br><br>"
ans_likes += "Likes has increased by {} since yesterday.".format(get_highlight_response(current_likes-yesterdays_likes))
likes_week_before = table[table['Crawl_Date']==date_shifter(todays_date,-7)]
likes_week_before = int(likes_week_before['Likes'].values[0])
if likes_week_before:
percentage_increase = (current_likes - likes_week_before)/current_likes*100
ans_likes += " There is a {}% increase in likes since last week.".format(get_highlight_response(round(percentage_increase,2)))
print(ans_likes)
except Exception as e :
print(e)
if ans_likes:
return ans_likes
else:
return "Data not available for "+movie_name
return ans_likes
def get_YT_data(movie_name):
connection = set_connection('disney')
table_name = ""
table = pd.read_sql(
'SELECT Likes,Views from {0} where Crawl_Date = "2019-10-10" and Movie = "{1}" '.format(table_name,movie_name), con=connection)
def get_distribution_data(movie_name):
if movie_name == "WAR":
distribution_table = pd.read_csv('War_2019-10-11.csv')
elif movie_name == "The Sky is Pink":
distribution_table = pd.read_csv('The_Sky_Is_Pink_2019-10-11.csv')
elif movie_name == "Joker":
distribution_table = pd.read_csv('Joker_2019-10-11.csv')
else:
return 'Movie not found'
atp_national = round(distribution_table['Ticket Price'].mean(axis=0))
distribution_table = distribution_table.groupby(['Theatre Region']).agg({'Ticket Price': ['mean']})
print(distribution_table)
print(atp_national)
#default
sortby = "Ticket Price_mean"
sort = False
#test alias
sortbyAlias = "₹"
distribution_table = flatten_columns(distribution_table)
distribution_table = distribution_table.reset_index(level=0)
print(distribution_table)
perfAns = "Average Ticket Price for {0} is {1}₹.".format(get_highlight_response(movie_name),get_highlight_response(atp_national))
perfAns+='<br><br>'
distribution_table = distribution_table.round(2)
perfAns+=create_multi_column_response(distribution_table, 'Theatre Region', sortby, " Cities with higher Average Ticket Price (ATP) include ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
sort = not sort
perfAns+='<br><br>'
perfAns+=create_multi_column_response(distribution_table, 'Theatre Region', sortby, " Cities with lower ATP include are ",
answer_suffix=sortbyAlias, answer_prefix=' with ', n_answer=3, sort_by=sortby, sort_asc=sort)
return perfAns
def date_shifter(date_in,day_shift,string_resp=False):
date_in = datetime.strptime(date_in,'%Y-%m-%d').date()
date_out = date_in + timedelta(days=day_shift)
if not string_resp:
return date_out
return str(date_out)
def df_with_performance_volume_percentage(df_in):
"""this function will add volume and volume percentage to the dataframe"""
#round occupancy
df_in['occupancy'] = df_in['occupancy'].round()
#volumne = occupancy*shows
df_in['Volume'] = df_in['shows']*df_in['occupancy']
#calculating percentage occupancy
volSum = df_in['Volume'].sum()
df_in['percentage'] = round((df_in['Volume'] / volSum)*100,2)
return df_in
def flatten_columns(df):
df.columns = ['_'.join(tup).rstrip('_') for tup in df.columns.values]
return df
#get_BMS_likes('Frozen 2')
# get_response('Bharat','','shows','')
# get_response_performance_city('Bharat','percentage','highest')
# get_response_performance_region('Bharat','percentage','','All')
# print(get_highlight_response("cacha"))
# print(date_shifter('2019-10-10' ,-1))
# get_distribution_data('Joker')
| divakar-yadav/Backend-APIs | frontendAPI/executor.py | executor.py | py | 11,994 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "backEnd.database.db_connection.set_connection",
"line_number": 13,
"usage_type": "call"
},
{
... |
13041202153 | import falcon
import json
import logging
logger = logging.getLogger(__name__)
class Correlation:
def __init__(self, store):
self.__store = store
def on_get(self, req, resp):
params = req.params
logger.info('request: {}'.format(params))
if 'series1' not in params or 'series2' not in params:
resp.status = falcon.HTTP_400
resp.body = json.dumps({'error message': 'bad series parameters'})
else:
resp.status = falcon.HTTP_200
resp.body = json.dumps({'corr': [[1, 2], [2, 3]]})
| Qinode/final-visual-api | src/resources/data/corr.py | corr.py | py | 576 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "falcon.HTTP_400",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "falcon.HTTP_200",
... |
73076321467 | from __future__ import annotations
import os
from typing import Callable, TYPE_CHECKING
if TYPE_CHECKING:
from bot.translator import Translator
app_name = "TTMediaBot"
app_version = "2.3.1"
client_name = app_name + "-V" + app_version
about_text: Callable[[Translator], str] = lambda translator: translator.translate(
"""\
A media streaming bot for TeamTalk.
Authors: Amir Gumerov, Vladislav Kopylov, Beqa Gozalishvili, Kirill Belousov.
Home page: https://github.com/gumerov-amir/TTMediaBot\
License: Mit License\
"""
)
fallback_service = "yt"
loop_timeout = 0.01
max_message_length = 256
recents_max_lenth = 32
tt_event_timeout = 2
directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
| gumerov-amir/TTMediaBot | bot/app_vars.py | app_vars.py | py | 715 | python | en | code | 52 | github-code | 6 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "bot.translator.Translator",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.path... |
3439919651 | from sortedcontainers import SortedDict
class Node:
def __init__(self, val=None):
self.val = val
self.next = None
self.last = None
class MaxStack:
def __init__(self):
self.dic = SortedDict()
self.root = Node()
self.root.last, self.root.next = self.root, self.root
def push(self, x: int) -> None:
if x not in self.dic:
self.dic[x] = []
node = Node(x)
self.root.next.last, self.root.next, node.last, node.next = node, node, self.root, self.root.next
self.dic[x].append(node)
def pop(self) -> int:
node = self.root.next
node.next.last, node.last.next = node.last, node.next
node_lst = self.dic[node.val]
node_lst.pop()
if len(node_lst) == 0:
self.dic.pop(node.val)
return node.val
def top(self) -> int:
return self.root.next.val
def peekMax(self) -> int:
return self.dic.peekitem()[0]
def popMax(self) -> int:
val, node_lst = self.dic.peekitem()
node = node_lst.pop()
if len(node_lst) == 0:
self.dic.pop(val)
node.next.last, node.last.next = node.last, node.next
return val
# Your MaxStack object will be instantiated and called as such:
# obj = MaxStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.peekMax()
# param_5 = obj.popMax()
| cuiy0006/Algorithms | leetcode/716. Max Stack.py | 716. Max Stack.py | py | 1,463 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sortedcontainers.SortedDict",
"line_number": 13,
"usage_type": "call"
}
] |
43959470416 | import datetime
import os
import random
import sys
from itertools import islice
from typing import List, Generator, Iterator
folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'ch03-mem-and-variables'))
sys.path.insert(0, folder)
import size_util
random.seed(42)
def main():
# Took 83 MB in naive mode
start_mem = report("Starting")
t0 = datetime.datetime.now()
original = load_data(); report("Load")
filtered = filter_data(original); report("filtered")
scaled = scale_data(filtered, 2.718); report("scaled")
# Need to work with it over and over and index it?
# scaled = list(scaled)
print("Head", list(islice(scaled, 0, 10)))
tail = []
for n in scaled:
tail.append(n)
if len(tail) > 10:
tail.pop(0)
print("Tail", tail)
final_mem = report("done")
dt = datetime.datetime.now() - t0
print(f"Done, mem usage: {final_mem-start_mem:,.0f} MB, in {dt.total_seconds():.2f} sec")
def report(step_name: str):
print(f"{step_name}:", end=' ')
return size_util.report_process_mem()
def load_data() -> Iterator[int]:
return (random.randint(1_000, 10_000) for _ in range(1, 1_000_000))
def filter_data(data: Iterator[int]) -> Iterator[int]:
for n in data:
if n % 5 != 0:
yield n
def scale_data(data: Iterator[int], factor: float) -> Iterator[float]:
return (
n * factor
for n in data
)
if __name__ == '__main__':
main()
| talkpython/python-memory-management-course | code/ch07-mem-and-functions/app_one_at_a_time.py | app_one_at_a_time.py | py | 1,496 | python | en | code | 39 | github-code | 6 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_nu... |
6690596464 | from json import load
with open('config.json', 'r') as file:
params = load(file)
BOT_TOKEN = params['BOT_TOKEN']
PARAMS = params['PARAMS']
SEARCH_URL = params['SEARCH_URL']
HOST = params['HOST']
PORT = params['PORT']
DB_NAME = params['DB_NAME']
| YusupovAI/TelegramBot | config.py | config.py | py | 274 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 4,
"usage_type": "call"
}
] |
71442915067 | import sys
import os
import logging
from datetime import datetime
from logging.handlers import TimedRotatingFileHandler
from logging import StreamHandler
from panda3d.core import (
loadPrcFile,
Filename,
ConfigVariableBool,
)
def setup_log(editor_name, log_to_console=False, log_level=logging.DEBUG):
# check if we have a config file
home = os.path.expanduser("~")
basePath = os.path.join(home, f".{editor_name}")
if not os.path.exists(basePath):
os.makedirs(basePath)
logPath = os.path.join(basePath, "logs")
if not os.path.exists(logPath):
os.makedirs(logPath)
# Remove log files older than 30 days
for f in os.listdir(logPath):
fParts = f.split(".")
fDate = datetime.now()
try:
fDate = datetime.strptime(fParts[-1], "%Y-%m-%d_%H")
delta = datetime.now() - fDate
if delta.days > 30:
#print(f"remove {os.path.join(logPath, f)}")
os.remove(os.path.join(logPath, f))
except Exception:
# this file does not have a date ending
pass
log_file = os.path.join(logPath, f"{editor_name}.log")
handler = TimedRotatingFileHandler(log_file)
logHandlers = [handler]
if log_to_console:
consoleHandler = StreamHandler()
logHandlers.append(consoleHandler)
logging.basicConfig(
level=log_level,
handlers=logHandlers)
for root, dirs, files in os.walk(basePath):
for f in files:
if not f.endswith(".prc"):
continue
config_file = os.path.join(root, f)
loadPrcFile(config_file)
config_file = os.path.join(basePath, f".{editor_name}.prc")
if os.path.exists(config_file):
loadPrcFile(Filename.fromOsSpecific(config_file))
else:
with open(config_file, "w") as prcFile:
prcFile.write("skip-ask-for-quit #f\n")
prcFile.write("frame-enable-scene-editor #t\n")
prcFile.write("frame-enable-gui-editor #t\n")
return log_file, config_file
| fireclawthefox/FRAME | panda3d_frame/editorLogHandler.py | editorLogHandler.py | py | 2,086 | python | en | code | 12 | github-code | 6 | [
{
"api_name": "logging.DEBUG",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.expanduser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
... |
14950903206 | # author: detoX
import glob
import numpy as np
import torch
import nibabel as nib
from PIL import Image
import nii_dataset
def main():
train_path = glob.glob("D:\\xuexi\\post-graduate\\py_projects\\ResNet-PET\\datasets\\Brain-PET\\Train\\*\\*")
test_path = glob.glob("D:\\xuexi\\post-graduate\\py_projects\\ResNet-PET\\datasets\\Brain-PET\\Test\\*")
count = 0
for path in train_path:
img = nib.load(path)
img = img.dataobj[:, :, :, 0]
idx = np.random.choice(range(img.shape[-1]), 50)
img = img[:, :, idx]
print(img.shape)
slice_img = img[:, :, 3]
# 由于原数据并不是图片,需要将图片归一化到[0, 1]区间,然后再放大到[0, 255]区间,因为灰度图片的亮度区间是0-255
slice_img = (slice_img / slice_img.max() * 255)
slice_img = Image.fromarray(slice_img)
if img.shape[0] != 128:
Image._show(slice_img)
else:
if count < 10:
Image._show(slice_img)
count += 1
# idx = np.random.choice(range(img.shape[-1]), 50)
# # idx.sort()
# img = img[:, :, idx]
# img = img.astype(np.float32)
def display_single_nii():
train_path = glob.glob("D:\\xuexi\\post-graduate\\py_projects\\ResNet-PET\\datasets\\Brain-PET\\Train\\*\\*")
test_path = glob.glob("D:\\xuexi\\post-graduate\\py_projects\\ResNet-PET\\datasets\\Brain-PET\\Test\\*")
for path in train_path:
img = nib.load(path)
img = img.dataobj[:, :, :, 0]
if img.shape[2] == 47:
# idx = np.random.choice(range(img.shape[-1]), 50)
# img = img[:, :, idx]
for s in range(47):
slice_img = img[:, :, s]
slice_img = (slice_img / slice_img.max() * 255).astype('uint8')
slice_img = Image.fromarray(slice_img)
# slice_img.show()
slice_img.save("./slice_imgs/nii_50/slice_{}.png".format(s))
break
# for i in range(50):
# slice_img = img[:, :, i]
# slice_img = (slice_img / slice_img.max() * 255)
# slice_img = Image.fromarray(slice_img)
# slice_img.show()
if __name__ == '__main__':
# main()
display_single_nii()
| Rah1m2/ResNet-PET | display_nii.py | display_nii.py | py | 2,266 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "glob.glob",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "glob.glob",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nibabel.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",
"line_numb... |
41912482635 | from array import array
import datetime
from datetime import datetime, timezone
import requests
import math
from app.core.common import cf
# import json
from app.core.wcommon import wcf
from app.db.database import couch
class WSearch():
def __init__(self) -> None:
self.SEARCH_TAGS = [
"archived",
"author",
"certainty",
"colour",
"incident",
"msgType",
"phenomenon",
"severity",
"source",
"status",
# "uuid"
]
pass
def getCList(self, query):
# input: query dict
# output: list
# query example
# query = {'db':'warnings',
# 'design': 'metcap',
# 'view':'phenomenon',
# 'key': 'lightning'
# }
self.query = query
self.qs = wcf.getQueryString(self.query)
self.result = []
response, status = couch.get(self.qs)
if len(response.json().keys()) >= 0:
if('rows' in response.json().keys()):
for doc in response.json()['rows']:
self.result.append(doc['id'])
return self.result
else:
return response.json()
def getWarningsArchivedList(self):
# input:
# output: list of CAP warning archive statuses in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'archived'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsAreaDescList(self):
# input:
# output: list of CAP warning area descriptions in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'areaDesc'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsAuthorList(self):
# input:
# output list of CAP warning authors in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'author'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsCertaintyList(self):
# input:
# output list of CAP warning certainties in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'certainty'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsColourList(self):
# input:
# output: list of CAP warning colours in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'colour'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsCustomAreaList(self):
# input:
# output: list of CAP warning custom areas in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'customArea'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsMsgTypeList(self):
# input:
# output: list of CAP warning message types in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'msgType'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getIncidentsNameList(self):
# input:
# output: list of CAP incident names in database
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'name'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getIncidentsDescriptionList(self):
# input:
# output: list of CAP incident descriptions in database
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'description'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsPhenomenonList(self):
# input:
# output: list of CAP warning phenomena in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'phenomenon'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsSeverityList(self):
# input:
# output list of CAP warning severities in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'severity'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsStatusList(self):
# input:
# output list of CAP warning statuses in database
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'status'
}
qs = wcf.getQueryString(self.query)
result = []
response, status = couch.get(qs)
for doc in response.json()['rows']:
result.append(doc['key'])
return sorted(set(result))
def getWarningsByIncidentId(self, id):
# input: string id
# output: cap id
# query example
# '0000000008'
# Incident IDs and names must be unique
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'incident',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
result.append(doc['id'])
# result.append(doc['doc'])
return result
# def getWarningsByIncidentDescription(self, description):
# # input: string description
# # output: cap
# # query example
# # 'description'
# # Incident IDs and names must be unique
# #
# incidentId = self.getIncidentByDescription(description)
# # test{
# print(incidentId)
# # test}
# self.query = {'db': 'warnings',
# 'design': 'metcap',
# 'view': 'incident',
# 'key': incidentId
# }
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# # qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
# response, status = couch.get(qs)
# result = []
# if(not response.json()['rows']):
# return result
# else:
# for doc in response.json()['rows']:
# result.append(doc['id'])
# # result.append(doc['doc'])
# return result
# def getIncidentByDescription(self, description):
# # input: description string
# # output: incident id
# self.query = {'db': 'incidents',
# 'design': 'metcap',
# 'view': 'description',
# 'key': description
# }
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# # qs = wcf.getQueryString(self.query)
# response, status = couch.get(qs)
# result = []
# if(not response.json()['rows']):
# return
# else:
# for doc in response.json()['rows']:
# result.append(doc['id'])
# return str(result[0])
def getWarningsByIncidentName(self, name):
# input: string name
# output: cap
# query example
# 'Muninn'
# Incident IDs and names must be unique
#
incidentId = self.getIncidentByName(name)
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'incident',
'key': incidentId
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"&include_docs=true'
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return result
else:
for doc in response.json()['rows']:
result.append(doc['id'])
# result.append(doc['doc'])
return result
def getIncidentByName(self, name):
# input: name string
# output: incident id
self.query = {'db': 'incidents',
'design': 'metcap',
'view': 'name',
'key': name
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
# qs = wcf.getQueryString(self.query)
response, status = couch.get(qs)
result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
result.append(doc['id'])
return str(result[0])
def getWarningsInPeriod(self, onset, expires):
# input: time stamps from warning database ("onset", "expires")
# output: list of valid CAP messages in the time interval
# self.result = []
self.dt = datetime.now(timezone.utc)
self.utc_time = self.dt.replace(tzinfo=timezone.utc)
self.utc_timestamp = math.floor(self.utc_time.timestamp())
self.lq = {'db': 'warnings',
'design': 'metcap',
'view': 'epochToOnset',
'startkey': wcf.getCapEpoch(onset),
'endkey': self.utc_timestamp
}
self.rq = {'db': 'warnings',
'design': 'metcap',
'view': 'epochToExpires',
'startkey': 0,
'endkey': wcf.getCapEpoch(expires)
}
la = ws.getCList(self.lq)
ra = ws.getCList(self.rq)
return list(set(la).intersection(ra))
def getWarningsTemporal(self, query):
self.query = query
return self.getWarningsInPeriod(self.query['onset'],self.query['expires'])
def debug(self, query):
self.query = query
return self.getWarningsInPeriod(self.query['onset'],self.query['expires'])
# return list(self.query.keys())
def capPolygonSearch(self, query):
self.query = query
self.iDList = wcf.findMatchingBounds(cf.getBounds(self.query))
self.q = {'db': 'warnings',
'design': 'metcap',
'view': 'polygon',
'keys': self.iDList
}
self.qs = wcf.getQueryString(self.q)
self.result = []
response, status = couch.get(self.qs)
if len(response.json().keys()) >= 0:
if('rows' in response.json().keys()):
for doc in response.json()['rows']:
if 'cutoff' in self.query.keys():
if (cf.polyOverlaps(wcf.getPolygon(doc['value']), cf.getQueryPoly(self.query), cutoff=self.query['cutoff'])):
self.result.append(doc['id'])
else:
if (cf.polyOverlaps(wcf.getPolygon(doc['value']), cf.getQueryPoly(self.query))):
self.result.append(doc['id'])
return self.result
else:
return response.json()
return self.result
def getWarningsInHeightRange(self, bottom, top):
self.lq = {'db': 'warnings',
'design': 'metcap',
'view': 'altitude',
'startkey': bottom,
'endkey': 2e6
}
self.rq = {'db': 'warnings',
'design': 'metcap',
'view': 'ceiling',
'startkey': 0,
'endkey': top
}
la = ws.getCList(self.lq)
ra = ws.getCList(self.rq)
return list(set(la).intersection(ra))
def getWarningsSpatial(self, query):
self.query = query
return self.getWarningsInHeightRange(self.query['altitude'],self.query['ceiling'])
def capSearch(self, query):
self.query = query
return self.getCAPsIntersection(self.query,self.SEARCH_TAGS)
def getCAPsIntersection(self,query,tags):
rSets = []
for t in tags:
if t in query.keys():
q = {'db': 'warnings',
'design': 'metcap',
'view': t,
'key': query[t]
}
rSets.append(set(self.getCList(q)))
if 'features' in query.keys():
if query['features'][0]['geometry']['type'] == 'Polygon':
rSets.append(set(self.capPolygonSearch(query)))
if ('onset' in query.keys() and 'expires' in query.keys()):
rSets.append(set(self.getWarningsInPeriod(query['onset'],query['expires'])))
# test{
if ('incidentName' in query.keys()):
rSets.append(set(self.getWarningsByIncidentName(query['incidentName'])))
# test}
return set.intersection(*rSets)
def capSearchLong(self,query):
self.query = query
self.query['db'] = 'warnings'
documents = []
idSet = self.getCAPsIntersection(self.query,self.SEARCH_TAGS)
for elem in idSet:
documents.append(elem)
self.result = []
for item in documents:
qs = f'/{self.query["db"]}/{item}'
response, status = couch.get(qs)
self.result.append(response.json())
# print(response.json())
return self.result
def getCapXMLNameByWarning(self,id):
# input: string id
# output: cap XML file name (array)
# query example
# input: getCapXMLNameByWarning('2.49.0.1.578.0.20220602073715')
# output: ['cap_xml']
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'capXML',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
response, status = couch.get(qs)
self.result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
self.result.append(doc['value'])
return self.result
def getWarningCapXML(self,id):
attachments = self.getCapXMLNameByWarning(id)
if(len(attachments) == 0):
return
else:
self.query = {'db': 'warnings',
'key': attachments[0]
}
qs = f'/{self.query["db"]}/{id}/{self.query["key"]}'
response,status = couch.get(qs)
return(response.content.decode())
def getCapJSONNameByWarning(self,id):
# input: string id
# output: cap JSON file name (array)
# query example
# input: getCapJSONNameByWarning('2.49.0.1.578.0.20220602073715')
# output: ['cap_json']
#
self.query = {'db': 'warnings',
'design': 'metcap',
'view': 'capJSON',
'key': id
}
qs = f'/{self.query["db"]}/_design/{self.query["design"]}/_view/{self.query["view"]}?key="{self.query["key"]}"'
response, status = couch.get(qs)
self.result = []
if(not response.json()['rows']):
return
else:
for doc in response.json()['rows']:
self.result.append(doc['value'])
return self.result
def getWarningCapJSON(self,id):
attachments = self.getCapJSONNameByWarning(id)
if(len(attachments) == 0):
return
else:
self.query = {'db': 'warnings',
'key': attachments[0]
}
qs = f'/{self.query["db"]}/{id}/{self.query["key"]}'
response,status = couch.get(qs)
return(response.content.decode())
###############################################################################
ws = WSearch()
| metno/weamyl-metcap | app/app/core/wsearch.py | wsearch.py | py | 19,290 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "app.core.wcommon.wcf.getQueryString",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "app.core.wcommon.wcf",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "app.db.database.couch.get",
"line_number": 44,
"usage_type": "call"
},
{
... |
22021057101 | from fractions import Fraction
from typing import Generic, TypeVar
import funcy
# generic `NamedTuple`s were only introduced in Python 3.11 - until then we need to
# import from `typing_extensions`
from typing_extensions import NamedTuple
from boiling_learning.io.dataclasses import dataclass
_T = TypeVar('_T')
class DatasetTriplet(NamedTuple, Generic[_T]):
train: _T
val: _T
test: _T
@dataclass(frozen=True)
class DatasetSplits:
train: Fraction | None = None
test: Fraction | None = None
val: Fraction | None = Fraction(0)
def __post_init__(self) -> None:
splits = (self.train, self.val, self.test)
n_nones = splits.count(None)
if n_nones > 1:
raise ValueError(
'at most one of *train*, *val* and *test* can be inferred (by passing `None`)'
)
if n_nones == 1:
names = ('train', 'val', 'test')
dct = funcy.zipdict(names, splits)
for name, split in dct.items():
if split is None:
others = funcy.omit(dct, [name])
others_sum = sum(others.values())
if not 0 < others_sum <= 1:
raise ValueError(
'it is required that 0 < '
+ ' + '.join(f'*{other}*' for other in others.keys())
+ ' <= 1'
)
split = 1 - others_sum
object.__setattr__(self, name, split)
splits = (self.train, self.val, self.test)
break
if sum(splits) != 1:
raise ValueError('*train* + *val* + *test* must equal 1')
if not (0 < self.train < 1 and 0 <= self.val < 1 and 0 < self.test < 1):
raise ValueError('it is required that 0 < (*train*, *test*) < 1 and 0 <= *val* < 1')
| ruancomelli/boiling-learning | boiling_learning/datasets/splits.py | splits.py | py | 1,911 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "typing.TypeVar",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing_extensions.NamedTuple",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Generic",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "fractions.... |
27260781556 | #Script for the first experiment of the multi-channel DART paper
#In this experiment, the performance of MC-DART is investigated for different number of channels and materials in the phantom,
# all averaged over 100 runs.
#Author,
# Mathé Zeegers,
# Centrum Wiskunde & Informatica, Amsterdam (m.t.zeegers@cwi.nl)
import astra
import numpy as np
import random
import pylab
import sys
import scipy.io
from ObjectAssembler import *
import MCDART
from HelperFunctions import *
from matplotlib.colors import ListedColormap
#Set random seed given by 'run' argument
if(len(sys.argv)>0):
np.random.seed(int(sys.argv[1]))
random.seed(int(sys.argv[1]))
#Path to folder to save the results
RESPATHPREFIX = "../results/MCDARTMaterialChannelExp"
def main():
NAngles = 32 #Number of projection angles
ARM = 'SIRT_CUDA' #Name of Algebraic Reconstruction Method to use
Startit = 10 #Iterations of the start reconstruction algorithm
MCDARTit = 10 #Number of MCDART iterations
ARMit = 10 #Iterations of the reconstruction algorithm in each MCDART iteration for each channel
FixProb = 0.99 #Fix probability - probability of 1-p of becoming a free pixel
diskOnly = True #Only let pixels inside the disk contribute to pixel error
smoothing = False #Use of smoothing in MCDART
r = 1 #Smoothing radius
b = 0.2 #Smoothing intensity
saveSpectrum = True #Save the material spectra
saveResults = True #Save the results
DetRed = True #Determines whether reduction of materials in phantom should be deterministic or random (set to False for exact paper results reproduction)
#Print settings
print("NAngles: ", NAngles, "\nStartit: ", Startit,"\nMCDARTit: ", MCDARTit,"\nARMit: ", ARMit,"\nFixProb: ", FixProb,"\ndiskOnly: ")
#Set (and create) specific saving folder
if(saveResults == True):
RESPATH = RESPATHPREFIX + "/ExpNAngles" + str(NAngles) + "ARM" + ARM + "Start" + str(Startit) + "MCDART" + str(MCDARTit) + "ARM" + str(ARMit) + "Fix" + str(FixProb)
if not os.path.exists(RESPATH):
os.makedirs(RESPATH)
#Set ranges for channels and materials (to reduce the phantom to)
minNoMaterials = 2
maxNoMaterials = 10
maxChannels = 10
#Supporting arrays for copying attenuation values of existing materials when another one is added to the phantom
temp = np.zeros((1,1))
temp2 = np.zeros((1,1))
#All pixel errors for this run
AllPixelErrors = np.zeros((maxNoMaterials-minNoMaterials+1, maxChannels))
if(saveSpectrum == True):
if not os.path.exists(RESPATH + "/MaterialSpectra"):
os.makedirs(RESPATH + "/MaterialSpectra")
#Loop over all materials and channels
for noMaterials in range(minNoMaterials, maxNoMaterials+1):
for noChannels in range(1,maxChannels+1):
print("Run", sys.argv[1], "#Materials:", noMaterials, ", #Channels:", noChannels)
#Load the phantom
if(len(sys.argv)>1):
TPh = Phantom("Nx128Nclass50Nchan1run" + str(sys.argv[1]) + ".tiff")
else:
TPh = Phantom("Nx128Nclass50Nchan1run1.tiff")
loadPhantomFile(TPh)
#Compute region of interest for pixel error
ROI = np.copy(TPh.MatArr)
if(diskOnly):
ROI[ROI > 1] = 1
else:
ROI.fill(1)
#Reduce the number of materials in the phantom (deterministically or randomly)
TPh.MatArr = reduceMaterials(TPh.MatArr, noMaterials, DetRed)
#Save reduced phantoms for a few configurations (run 1 and 2 or 10 materials)
if(saveResults and int(sys.argv[1]) == 1):
if(noMaterials == 2):
cmap = ListedColormap(['red', 'blue', 'yellow'], 'indexed')
FILEOUT = '../results/plots'
filename = 'Nx128Nclass50Nchan1run1CONVERTEDmat2'
pylab.imsave(FILEOUT + '/' + filename + '.png', TPh.MatArr, dpi=600, cmap=cmap)
pylab.imsave(FILEOUT + '/' + filename + '.eps', TPh.MatArr, cmap=cmap)
elif(noMaterials == 10):
cmap = ListedColormap(['red', 'blue', 'yellow', 'green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'white'], 'indexed')
FILEOUT = '../results/plots'
filename = 'Nx128Nclass50Nchan1run1CONVERTEDmat10'
pylab.imsave(FILEOUT + '/' + filename + '.png', TPh.MatArr, dpi=600, cmap=cmap)
pylab.imsave(FILEOUT + '/' + filename + '.eps', TPh.MatArr, cmap=cmap)
#Define channels (1 to #noChannels)
channels = np.arange(1,noChannels+1)
#Get number of materials in the reduced phantom
materials = np.unique(TPh.MatArr)
nomaterials = len(materials)
#Get number of channels and create random spectra
Channels = len(channels)
DiscMaterialSpectra = makeRandomDiscMaterialSpectra(nomaterials, Channels)
#Copy spectra of previously used materials
DiscMaterialSpectra[0:temp2.shape[0],:] = temp2[:,0:DiscMaterialSpectra.shape[1]]
DiscMaterialSpectra[0:temp.shape[0],0:temp.shape[1]] = temp
#Save the material spectra defined above
if(saveSpectrum and noMaterials == maxNoMaterials and noChannels == maxChannels):
if(len(sys.argv) > 0):
np.savetxt(RESPATH + "/MaterialSpectra/materialSpectraRun" + str(sys.argv[1]) + ".txt", DiscMaterialSpectra, fmt='%1.3f')
else:
np.savetxt(RESPATH + "/MaterialSpectra/materialSpectra.txt", DiscMaterialSpectra, fmt='%1.3f')
#Make material labels and attenuation spectra
del TPh.Labels[:]
for mat in materials:
TPh.Labels.append((mat, mat))
TPh.Labels.sort(key = operator.itemgetter(0))
for mat in TPh.Labels:
if(mat[0] != 0 and mat[1] != 0): #Exclude background
AtNo = mat[1]
if (AtNo > 0):
if AtNo not in [i[0] for i in TPh.AttenuationSpectra]: #Check if material is not already there
x, y = channels, DiscMaterialSpectra[AtNo][:]
if(noChannels > 1):
spectrum = scipy.interpolate.interp1d(x, y)
else:
spectrum = scipy.poly1d([y[0]])
attData = (x, y, spectrum)
TPh.AttenuationSpectra.append((AtNo,)+(mat[1],) + attData)
TPh.AttenuationSpectra.sort(key = operator.itemgetter(0)) #Keep sorted on number
#Run the MC-DART algorithm
pixelerror, seg = MCDART.MCDART(TPh, r, b, NAngles, ARM, Startit, MCDARTit, ARMit, FixProb, channels, materials, DiscMaterialSpectra, ROI = ROI, Smoothing = smoothing)
#Save the final segmentation
if(saveResults == True):
if not os.path.exists(RESPATH + "/Reconstructions"):
os.makedirs(RESPATH + "/Reconstructions")
colors = ['red', 'blue', 'yellow', 'green', 'orange', 'purple', 'brown', 'pink', 'gray', 'olive', 'cyan', 'white']
cmap = ListedColormap(colors[0:nomaterials], 'indexed')
pylab.imsave(RESPATH + "/Reconstructions/FinalSegRun" + str(sys.argv[1]) + "NoMat" + str(noMaterials) + "noChannels" + str(noChannels) + ".png", seg, dpi=600, cmap=cmap)
pylab.imsave(RESPATH + "/Reconstructions/FinalSegRun" + str(sys.argv[1]) + "NoMat" + str(noMaterials) + "noChannels" + str(noChannels) + ".eps", seg, cmap=cmap)
#Update the array with pixel errors
AllPixelErrors[noMaterials-minNoMaterials][noChannels-1] = pixelerror
#Saves the material attenuations for the next (channel) iteration
temp = DiscMaterialSpectra
#Saves the material attenuations for the next (material) iteration
temp2 = DiscMaterialSpectra
temp = np.zeros((1,1))
#Pixel error for all material-channel combinations
print("Pixel errors for all material-channel combinations\n", AllPixelErrors)
#Save pixel error results
if(saveResults == True):
if not os.path.exists(RESPATH + "/PixelErrors"):
os.makedirs(RESPATH + "/PixelErrors")
np.savetxt(RESPATH + "/PixelErrors/pixelErrorRun" + str(sys.argv[1]) + ".txt", AllPixelErrors, fmt='%i')
if __name__ == "__main__":
main()
| mzeegers/MC-DART | scripts/MCDARTExp1.py | MCDARTExp1.py | py | 8,833 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.seed",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"lin... |
72729149627 | import cv2
from skimage.metrics import structural_similarity as ssim
import numpy as np
from PIL import Image, ImageChops
import matplotlib.pyplot as plt
################################################################
########### USING PIXEL COMPARISON #########################
############## IMPORTANT TO READ !! #########################
#1 for maps with real data where Gmapping gives maps with #fefefe instead of #ffffff, and grey with #D8D8D8
# 2 for maps with maching colors, usually simulated data
# 3 for comparing maps with error, or maps where there are many tonalities of white
val = 1
#Path to the map generated by our algorithm
img1= Image.open('C:/Users/jc/Documents/GitHub/saut_ogm/image-comparison/ALL_occ-0.8_free-0.25_cell-0.05_wall-0.05.png')
#Path to the reference map
img2= Image.open('C:/Users/jc/Documents/GitHub/saut_ogm/image-comparison/mape2.png')
#Convert to RGB
img1 = img1.convert("RGB")
img2 = img2.convert("RGB")
#Resize images
width = max(img1.width, img2.width)
height = max(img1.height, img2.height)
img1 = img1.resize((width, height))
img2 = img2.resize((width, height))
# Convert the images to NumPy arrays
pixels1 = np.array(img1)
pixels2 = np.array(img2)
# Find white pixels in img1 and img2
white_pixels_1 = np.all(pixels1 == [255, 255, 255], axis=2)
#Maps that come from Gmapping are not 100% white, they are #fefefe
if val == 1:
white_pixels_2 = np.all(pixels2 == [254, 254, 254], axis=2)
elif val ==3:
min_value = [180, 180, 180]
max_value = [255, 255, 255]
white_pixels_2 = np.all((pixels2 >= min_value) & (pixels2 <= max_value), axis=2)
else:
white_pixels_2 = np.all(pixels2 == [255, 255, 255], axis=2)
# Initialize a counter for the different white pixels
count_white1 = np.sum(white_pixels_1)
count_white2 = np.sum(white_pixels_2)
#Interception
intersecting_pixels = np.sum(white_pixels_1 & white_pixels_2)
total_white_pixels = count_white1 + count_white2 - intersecting_pixels
#total_white_pixels = np.sum(white_pixels_1)
percentage_diff_white = (intersecting_pixels / total_white_pixels) * 100
#Print results
print("Using Only White Comparison:");
print("white 1: " + str(np.sum(white_pixels_1)))
print("white 2: " + str(np.sum(white_pixels_2)))
print("size: " + str(height*width))
print("Number of Intersecting Pixels: " + str(intersecting_pixels))
print("Percentage of equal pixels: " + str(percentage_diff_white))
diff = np.sum(pixels1!=pixels2)
# Create a copy of img1
highlighted_img1 = np.array(img1)
highlighted_img2 = np.array(img2)
diff_pixels = np.any(pixels1 != pixels2, axis=-1)
if val == 1:
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [254, 254, 254], axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
equal_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [254, 254, 254], axis=-1),
np.all(pixels1 == [149, 149, 149], axis=-1) & np.all(pixels2 == [216, 216, 216], axis=-1)
)
diff_pixels = np.logical_and(diff_pixels, np.logical_not(equal_pixels))
elif val == 3:
pixels2_range = (pixels2 >= min_value) & (pixels2 <= max_value)
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2_range, axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
white = np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2_range, axis=-1)
else:
map_pixels = np.logical_or(
np.all(pixels1 == [255, 255, 255], axis=-1) & np.all(pixels2 == [255, 255, 255], axis=-1),
np.all(pixels1 == [0, 0, 0], axis=-1) & np.all(pixels2 == [0, 0, 0], axis=-1),
)
diff_img = np.array(img1)
#In red the different pixels
diff_img[diff_pixels] = [255, 0, 0]
#In blue the white pixels of both images
diff_img[white_pixels_1] = [0,0,255]
diff_img[white_pixels_2] = [0, 0, 255]
#In green the map pixels (white and black) that appear simultaneously in both images
diff_img[map_pixels] = [0,255,0]
plt.imshow(diff_img)
plt.title('Differences between Image 1 and Image 2')
plt.show()
| joaofgois/saut_ogm | scripts/MapComparisonMetric.py | MapComparisonMetric.py | py | 4,169 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "PIL.Image.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
39254020126 | import os
import rasterio
import geopandas as gpd
import shapely
from shapely.geometry import box
from tqdm import tqdm
def parse_txt(txt_dir):
"""
Read txt file.
bbox format - xmin, ymin, xmax, ymax (unnormalized).
Params:
txt_dir (str): path to text file containing bboxes.
Returns:
example - [[xmin, ymin, xmax, ymax],
[xmin, ymin, xmax, ymax]]
"""
with open(txt_dir, 'r') as file:
content = [i.strip().split(',') for i in file.readlines()]
bboxes = [list(map(float, i)) for i in content]
return bboxes
def parse_geojson(geojson_dir):
"""
Read geojson file.
Params:
geojson_dir (str): path to geosjon file containing coordinates and crs system. For geo-referencing.
Returns:
image_id (str)
src_crs (source crs)
left (float)
top (float)
right (float)
bottom (float)
"""
# read geojson file
geo_df = gpd.read_file(geojson_dir)
image_id = str(geo_df.iloc[0]['id'].item())
left = geo_df.iloc[0]['left'].item()
top = geo_df.iloc[0]['top'].item()
right = geo_df.iloc[0]['right'].item()
bottom = geo_df.iloc[0]['bottom'].item()
src_crs = geo_df.crs
return image_id, src_crs, left, top, right, bottom
def produce_geo_files(model_output_folder, geojson_folder, output_folder):
"""
Geo-reference bounding boxes(model predictions) from text files and produce geojson files.
Params:
model_output_folder (str): folder containing model prediction text files
geojson_folder (str): folder containing geojson files to be used for geo-referencing
output_folder (str): folder where final geojson files containing geo-referenced model predictions will be produced.
Returns:
None
"""
txt_file_list = os.listdir(model_output_folder)
filename_list = [os.path.splitext(i)[0] for i in txt_file_list]
os.makedirs(output_folder, exist_ok = True)
# for each text file
for filename in filename_list:
# w, h assumed to be 1000x1000
image_width, image_height = 1000, 1000
# file dirs
geojson_dir = os.path.join(geojson_folder, filename + '.geojson')
txt_dir = os.path.join(model_output_folder, filename + '.txt')
# get bounding box list from txt file
bboxes = parse_txt(txt_dir)
# get geo-information for current png image tile
image_id, src_crs, left, top, right, bottom = parse_geojson(geojson_dir)
# used for mapping image pixel values to geo-coordinates
affine_tfm = rasterio.transform.from_bounds(west = left, south = bottom, east = right, north = top,
width = image_width, height = image_height)
bbox_geom_list, centroid_geom_list = [], []
# for each bbox in current txt file
for bbox in bboxes:
xmin, ymin, xmax, ymax = bbox
print('box coords:', xmin, ymin, xmax, ymax)
# geo-reference bounding box
bbox_geom = pix_to_geo_coords(affine_tfm, xmin, ymin, xmax, ymax)
# centroid of bounding box
bbox_centroid = bbox_geom.centroid
# append geo-registered bounding box and centroid
bbox_geom_list.append([bbox_geom])
centroid_geom_list.append([bbox_centroid])
# create 2 dataframes - one for bbox and one for centroid
bbox_geo_df = gpd.GeoDataFrame(bbox_geom_list, columns=['geometry'], crs=src_crs)
centroid_geo_df = gpd.GeoDataFrame(centroid_geom_list, columns=['geometry'], crs=src_crs)
# save dirs for 2 dataframes
bbox_gdf_save_dir = os.path.join(output_folder, filename + '_box' + '.geojson')
centroid_gdf_save_dir = os.path.join(output_folder, filename + '_centroid' + '.geojson')
# save 2 dataframes
bbox_geo_df.to_file(bbox_gdf_save_dir, driver='GeoJSON')
centroid_geo_df.to_file(centroid_gdf_save_dir, driver='GeoJSON')
def split_geojsons(geojson_dir, output_folder):
"""
Splitting the original geojson file 'sudan_grid.geojson' (file size around 2.4 Gb).
The geojson file contains geo-information (e.g. top, left, bottom, right geo-coordinates) for all png tiles.
After splitting, each geojson file will contain geo-information for only a single png tile.
Params:
geojson_dir (str): path to the original geojson file 'sudan_grid.geojson'
output_folder (str): folder where geojson files for each png tile will be produced.
Returns:
None
"""
os.makedirs(output_folder, exist_ok = True)
data = gpd.read_file(geojson_dir)
total_rows = len(data)
crs = data.crs
for idx in tqdm(range(total_rows)):
row = list(data.loc[idx])
file_id = str(row[0])
save_dir = os.path.join(output_folder, file_id + '.geojson')
gdf = gpd.GeoDataFrame([row], columns=['id', 'left', 'top', 'right', 'bottom', 'geometry'], crs=crs)
gdf.to_file(save_dir, driver='GeoJSON')
print(save_dir, ' --> Done.')
def pix_to_geo_coords(affine_tfm, xmin, ymin, xmax, ymax):
"""
Geo-reference a bounding box.
Params:
affine_tfm (affine.Affine): used for affine transformation
xmin (float): x min value of bounding box
ymin (float): y min value of bounding box
xmax (float): x max value of bounding box
ymax (float): y max value of bounding box
Returns:
geo_box (shapely.geometry.polygon.Polygon)
"""
shapely_box = box(xmin, ymin, xmax, ymax)
geo_box = shapely.affinity.affine_transform(shapely_box,
[affine_tfm.a,
affine_tfm.b,
affine_tfm.d,
affine_tfm.e,
affine_tfm.xoff,
affine_tfm.yoff])
return geo_box
| unicef/Mongolia-school-mapping-AI-models | codes/geo_utils.py | geo_utils.py | py | 6,240 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "geopandas.read_file",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_n... |
33036426825 | """Config flow for UniFi."""
import socket
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_ALLOW_BANDWIDTH_SENSORS,
CONF_BLOCK_CLIENT,
CONF_CONTROLLER,
CONF_DETECTION_TIME,
CONF_IGNORE_WIRED_BUG,
CONF_POE_CLIENTS,
CONF_SITE_ID,
CONF_SSID_FILTER,
CONF_TRACK_CLIENTS,
CONF_TRACK_DEVICES,
CONF_TRACK_WIRED_CLIENTS,
CONTROLLER_ID,
DEFAULT_POE_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
LOGGER,
)
from .controller import get_controller
from .errors import AlreadyConfigured, AuthenticationRequired, CannotConnect
DEFAULT_PORT = 8443
DEFAULT_SITE_ID = "default"
DEFAULT_VERIFY_SSL = False
@callback
def get_controller_id_from_config_entry(config_entry):
"""Return controller with a matching bridge id."""
return CONTROLLER_ID.format(
host=config_entry.data[CONF_CONTROLLER][CONF_HOST],
site=config_entry.data[CONF_CONTROLLER][CONF_SITE_ID],
)
class UnifiFlowHandler(config_entries.ConfigFlow, domain=UNIFI_DOMAIN):
"""Handle a UniFi config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return UnifiOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the UniFi flow."""
self.config = None
self.desc = None
self.sites = None
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
try:
self.config = {
CONF_HOST: user_input[CONF_HOST],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_PORT: user_input.get(CONF_PORT),
CONF_VERIFY_SSL: user_input.get(CONF_VERIFY_SSL),
CONF_SITE_ID: DEFAULT_SITE_ID,
}
controller = await get_controller(self.hass, **self.config)
self.sites = await controller.sites()
return await self.async_step_site()
except AuthenticationRequired:
errors["base"] = "faulty_credentials"
except CannotConnect:
errors["base"] = "service_unavailable"
except Exception: # pylint: disable=broad-except
LOGGER.error(
"Unknown error connecting with UniFi Controller at %s",
user_input[CONF_HOST],
)
return self.async_abort(reason="unknown")
host = ""
if await async_discover_unifi(self.hass):
host = "unifi"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): bool,
}
),
errors=errors,
)
async def async_step_site(self, user_input=None):
"""Select site to control."""
errors = {}
if user_input is not None:
try:
desc = user_input.get(CONF_SITE_ID, self.desc)
for site in self.sites.values():
if desc == site["desc"]:
self.config[CONF_SITE_ID] = site["name"]
break
for entry in self._async_current_entries():
controller = entry.data[CONF_CONTROLLER]
if (
controller[CONF_HOST] == self.config[CONF_HOST]
and controller[CONF_SITE_ID] == self.config[CONF_SITE_ID]
):
raise AlreadyConfigured
data = {CONF_CONTROLLER: self.config}
return self.async_create_entry(title=desc, data=data)
except AlreadyConfigured:
return self.async_abort(reason="already_configured")
if len(self.sites) == 1:
self.desc = next(iter(self.sites.values()))["desc"]
return await self.async_step_site(user_input={})
sites = []
for site in self.sites.values():
sites.append(site["desc"])
return self.async_show_form(
step_id="site",
data_schema=vol.Schema({vol.Required(CONF_SITE_ID): vol.In(sites)}),
errors=errors,
)
class UnifiOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Unifi options."""
def __init__(self, config_entry):
"""Initialize UniFi options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.controller = None
async def async_step_init(self, user_input=None):
"""Manage the UniFi options."""
self.controller = self.hass.data[UNIFI_DOMAIN][self.config_entry.entry_id]
self.options[CONF_BLOCK_CLIENT] = self.controller.option_block_clients
if self.show_advanced_options:
return await self.async_step_device_tracker()
return await self.async_step_simple_options()
async def async_step_simple_options(self, user_input=None):
"""For simple Jack."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="simple_options",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
}
),
)
async def async_step_device_tracker(self, user_input=None):
"""Manage the device tracker options."""
if user_input is not None:
self.options.update(user_input)
return await self.async_step_client_control()
ssids = (
set(self.controller.api.wlans)
| {
f"{wlan.name}{wlan.name_combine_suffix}"
for wlan in self.controller.api.wlans.values()
if not wlan.name_combine_enabled
}
| {
wlan["name"]
for ap in self.controller.api.devices.values()
for wlan in ap.wlan_overrides
if "name" in wlan
}
)
ssid_filter = {ssid: ssid for ssid in sorted(list(ssids))}
return self.async_show_form(
step_id="device_tracker",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TRACK_CLIENTS,
default=self.controller.option_track_clients,
): bool,
vol.Optional(
CONF_TRACK_WIRED_CLIENTS,
default=self.controller.option_track_wired_clients,
): bool,
vol.Optional(
CONF_TRACK_DEVICES,
default=self.controller.option_track_devices,
): bool,
vol.Optional(
CONF_SSID_FILTER, default=self.controller.option_ssid_filter
): cv.multi_select(ssid_filter),
vol.Optional(
CONF_DETECTION_TIME,
default=int(
self.controller.option_detection_time.total_seconds()
),
): int,
vol.Optional(
CONF_IGNORE_WIRED_BUG,
default=self.controller.option_ignore_wired_bug,
): bool,
}
),
)
async def async_step_client_control(self, user_input=None):
"""Manage configuration of network access controlled clients."""
errors = {}
if user_input is not None:
self.options.update(user_input)
return await self.async_step_statistics_sensors()
clients_to_block = {}
for client in self.controller.api.clients.values():
clients_to_block[
client.mac
] = f"{client.name or client.hostname} ({client.mac})"
return self.async_show_form(
step_id="client_control",
data_schema=vol.Schema(
{
vol.Optional(
CONF_BLOCK_CLIENT, default=self.options[CONF_BLOCK_CLIENT]
): cv.multi_select(clients_to_block),
vol.Optional(
CONF_POE_CLIENTS,
default=self.options.get(CONF_POE_CLIENTS, DEFAULT_POE_CLIENTS),
): bool,
}
),
errors=errors,
)
async def async_step_statistics_sensors(self, user_input=None):
"""Manage the statistics sensors options."""
if user_input is not None:
self.options.update(user_input)
return await self._update_options()
return self.async_show_form(
step_id="statistics_sensors",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_BANDWIDTH_SENSORS,
default=self.controller.option_allow_bandwidth_sensors,
): bool
}
),
)
async def _update_options(self):
"""Update config entry options."""
return self.async_create_entry(title="", data=self.options)
async def async_discover_unifi(hass):
"""Discover UniFi address."""
try:
return await hass.async_add_executor_job(socket.gethostbyname, "unifi")
except socket.gaierror:
return None
| 84KaliPleXon3/home-assistant-core | homeassistant/components/unifi/config_flow.py | config_flow.py | py | 11,066 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "const.CONTROLLER_ID.format",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "const.CONTROLLER_ID",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "const.CONF_CONTROLLER",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "... |
22688604701 | from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Product
from apps.customers.models import Customer
from django.views.decorators.http import require_POST
from .cart import Cart
from .forms import CartAddProductForm
@login_required
def shop_index(request):
# Count the number of products in the databse
product_count = Product.objects.count()
# Count the number of customers in the database
customer_count = Customer.objects.count()
context = {
# 'products': products,
'product_count': product_count,
'customer_count': customer_count,
}
return render(request, 'shop/shop_index.html', context)
@login_required
def product_list(request):
products = Product.objects.all()
context = {
'products': products,
}
return render(request, 'shop/product_list.html', context)
@login_required
def product_detail(request, id):
product = get_object_or_404(Product, id=id)
cart_product_form = CartAddProductForm()
context = {
'product': product,
'cart_product_form': cart_product_form
}
return render(request, 'shop/product_detail.html', context)
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
cart.add(product=product, quantity=cd['quantity'], override_quantity=cd['override'])
return redirect('shop:cart_detail')
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('shop:cart_detail')
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={
'quantity': item['quantity'],
'override': True})
return render(request, 'shop/cart_detail.html', {'cart': cart}) | ordemdigitale/django-crm-v2 | apps/shop/views.py | views.py | py | 2,148 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "models.Product.objects.count",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Product.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Product",
"line_number": 13,
"usage_type": "name"
},
{
"api_name"... |
38814850733 | import matplotlib.pyplot as plt
import pandas as pd
import argparse
import seaborn as sns
sns.set_context("notebook", font_scale=1.8)
plt.style.use('fivethirtyeight')
parser = argparse.ArgumentParser()
parser.add_argument('--classifier', default="svm", type=str, nargs='?', help='classifier')
args = parser.parse_args()
classifier = args.classifier
# plot accuracy
acc_result = './result/_result_{}_acc.csv'.format(classifier)
df = pd.read_csv(acc_result, header=0, sep=",")
print("plot accuracy")
g = sns.catplot(x="Dataset", y="Accuracy", hue="Method", data=df, kind="bar", ci="sd", height=5, aspect=2, palette="Set1")
g.set_xlabels("Dataset")
g.set_ylabels("Accuracy")
for idx, p in enumerate(g.ax.patches):
height = round(p.get_height(), 2)
g.ax.text(p.get_x()+p.get_width()/2, height+1, str(round(height, 2)), ha="center", fontsize=10)
plt.savefig("./result/_plot_{}_accuracy.pdf".format(classifier), bbox_inches="tight")
plt.close()
# plot AUC
auc_result = './result/_result_{}_auc.csv'.format(classifier)
df = pd.read_csv(auc_result, header=0, sep=",")
print("plot AUC")
g = sns.catplot(x="Dataset", y="AUC", hue="Method", data=df, kind="bar", ci="sd", height=5, aspect=2, palette="Set1")
g.set_xlabels("Dataset")
g.set_ylabels("AUC")
for idx, p in enumerate(g.ax.patches):
height = round(p.get_height(), 2)
g.ax.text(p.get_x()+p.get_width()/2, height+1, str(round(height, 2)), ha="center", fontsize=10)
plt.savefig("./result/_plot_{}_auc.pdf".format(classifier), bbox_inches="tight")
plt.close()
| nphdang/CCRAL | visualize.py | visualize.py | py | 1,524 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "seaborn.set_context",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_nam... |
10114743152 | from __future__ import annotations
from typing import Tuple
import stage.tile_types as tile_types
from stage.game_map import GameMap
class Room:
"""Klass för att representera ett rektangulärt rum"""
def __init__(self, x: int, y: int, width: int, height: int) -> None:
self.x1 = x
self.y1 = y
self.x2 = x + width
self.y2 = y + height
self.width = width
self.height = height
# @property gör så att vi kan använda metoden som en konstant variabel snarare än en metod
# t.ex kan vi skriva mitt_rum.center istället för mitt_rum.center()
@property
def center(self) -> Tuple[int, int]:
"""Återvänder koordinaterna till centrum av rummet"""
center_x = int((self.x1 + self.x2) / 2)
center_y = int((self.y1 + self.y2) / 2)
return center_x, center_y
@property
def inner(self) -> Tuple[slice, slice]:
"""Återvänder den inre arean av det givna rummet"""
# slice() kommer att återge de givna argumenten
return slice(self.x1 + 1, self.x2), slice(self.y1 + 1, self.y2)
def intersects(self, other: Room) -> bool:
"""Återvänder sant om den här instansen av rummet överlappar med ett annat rum"""
return (
self.x1 <= other.x2
and self.x2 >= other.x1
and self.y1 <= other.y2
and self.y2 >= other.y1
)
| programmerare93/Dungeons_of_Kwargs | src/stage/rooms.py | rooms.py | py | 1,435 | python | sv | code | 4 | github-code | 6 | [
{
"api_name": "typing.Tuple",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 30,
"usage_type": "name"
}
] |
70465798267 | import sys
from cx_Freeze import setup, Executable
target = Executable(script="Main_Program_Design.py",
base = "Win32GUI",
icon="Meat_Icon.ico")
setup(name="Meat Shop Management System",
version = "1.0",
description="A simple program that helps the owner compute the order of the clients.",
executables = [target])
| zEuS0390/python-meat-shop-management-system | setup.py | setup.py | py | 375 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "cx_Freeze.Executable",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cx_Freeze.setup",
"line_number": 8,
"usage_type": "call"
}
] |
73400038269 | #!/usr/bin/env python2
import sys
sys.path.insert(0, '/root/jhbuild')
import jhbuild.main
import jhbuild.moduleset
from jhbuild.versioncontrol.git import GitBranch
import __builtin__
import json
__builtin__.__dict__['SRCDIR'] = '/root/jhbuild'
__builtin__.__dict__['PKGDATADIR'] = None
__builtin__.__dict__['DATADIR'] = None
config = jhbuild.config.Config(None, [])
config.interact = False
moduleset = jhbuild.moduleset.load(config)
repos = {}
for module in moduleset.modules.values():
if isinstance(module.branch, GitBranch):
repos[module.name] = {
'url': module.branch.module,
'branch': module.branch.branch or 'master'
}
with open('config.json', 'w') as conf:
json.dump({
'max-concurrent-indexers': 4,
'dbpath': '/db',
'repos': repos
}, conf, sort_keys=True, indent=4, separators=(',', ': '))
| benwaffle/gnome-hound | gen-conf.py | gen-conf.py | py | 878 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sys.path.insert",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "__builtin__.__dict__",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "__builtin__.__d... |
1549161757 | import numpy as np
import pickle
import os
import random
from compute_pairwise_dataset import compute_pairwise_dataset
import torch
from utils import get_torch_device
def save_dataset(qids, X, y, folder):
"""
Save the dataset in the provided folder.
"""
if not os.path.exists(folder):
os.mkdir(folder)
pickle.dump(qids, open(f'{folder}/qids.pickle', 'wb'))
pickle.dump(y, open(f'{folder}/y.pickle', 'wb'))
pickle.dump(X, open(f'{folder}/X.pickle', 'wb'))
def process_line(line: str) -> np.ndarray:
line_without_comment = line.split('#')[0]
line_without_comment = line_without_comment.strip()
features = line_without_comment.split(sep=' ')
score = float(features[0])
qid = int(features[1].split(':')[1].strip())
processed_features = list(map(lambda x: float(x.split(':')[1].strip()), features[2:]))
return qid, score, np.array(processed_features)
def process_dataset(dataset: str):
qids = []
scores = []
features = []
lines = dataset.splitlines()
for line in lines:
qid, score, feature_vec = process_line(line)
qids.append(qid)
scores.append(score)
features.append(feature_vec)
# print(scores)
print('Dataset loaded and processed')
return np.array(qids), np.array(scores), np.stack(features)
def get_dataset(path: str):
with open(path, 'r') as file:
return process_dataset(file.read())
def group_data_by_query_id(qids, scores, features):
data_by_query = {}
for i, qid in enumerate(qids):
if qid not in data_by_query.keys():
data_by_query[qid] = list()
data_by_query[qid].append((scores[i], features[i]))
return data_by_query
def compute_pairwise_dataset_for_query(qid, data_by_query, score_equal_drop_prob=0.85):
score_features_list = data_by_query[qid]
pairwise_features = []
target_probabilities = []
for i in range(len(score_features_list)):
for j in range(len(score_features_list)):
if i == j:
continue
score_i, features_i = score_features_list[i][0], score_features_list[i][1]
score_j, features_j = score_features_list[j][0], score_features_list[j][1]
# if score_i == score_j:
# rnd = random.random()
# if rnd < score_equal_drop_prob:
# continue
combined_feature = np.concatenate([features_i, features_j])
target_probability = 1.0 if score_i > score_j else (0.5 if score_i == score_j else 0.0)
pairwise_features.append(combined_feature)
target_probabilities.append(target_probability)
return pairwise_features, target_probabilities
def get_pairwise_dataset(path: str):
qids, scores, features = get_dataset(path)
# group dataset by query id
data_by_query = group_data_by_query_id(qids, scores, features)
unique_qids = list(set(list(qids)))
pairwise_qids = []
pairwise_target_probabilities = []
pairwise_features = []
for i, qid in enumerate(unique_qids):
print(f'{i} / {len(unique_qids)}')
f, p = compute_pairwise_dataset_for_query(qid, data_by_query)
pairwise_qids += [qid] * len(p)
pairwise_target_probabilities += p
pairwise_features += f
return np.array(pairwise_qids), np.array(pairwise_target_probabilities), np.stack(pairwise_features)
def get_pairwise_dataset_fast(path: str):
qids, scores, features = get_dataset(path)
scores = torch.from_numpy(scores).type(torch.FloatTensor).to(get_torch_device())
features = torch.from_numpy(features).type(torch.FloatTensor).to(get_torch_device())
# group dataset by query id
unique_qids = list(set(list(qids)))
t_qids = torch.from_numpy(qids).type(torch.FloatTensor).to(get_torch_device())
pairwise_qids = []
pairwise_target_probabilities = []
pairwise_features = []
for i, qid in enumerate(unique_qids):
print(f'{i} / {len(unique_qids)}')
indices = torch.nonzero(t_qids == qid).T[0]
X = features[indices]
y = scores[indices]
X_pairwise, y_pairwise = compute_pairwise_dataset(X, y)
qid_pairwise = qid * torch.ones_like(y_pairwise)
qid_pairwise = qid_pairwise.type(torch.IntTensor)
pairwise_qids.append(qid_pairwise)
pairwise_target_probabilities.append(y_pairwise)
pairwise_features.append(X_pairwise)
return torch.concat(pairwise_qids), torch.concat(pairwise_target_probabilities), torch.concat(pairwise_features, dim=0)
def load_dataset(folder):
"""
Load the the pairwise training dataset used in ranknet training.
"""
qids = pickle.load(open(f'{folder}/qids.pickle', 'rb'))
y = pickle.load(open(f'{folder}/y.pickle', 'rb'))
X = pickle.load(open(f'{folder}/X.pickle', 'rb'))
return qids, y, X
| catalinlup/learning-to-rank | src/data_loaders.py | data_loaders.py | py | 4,938 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pickle.dump",
"line_number":... |
20678009932 | from django.test import TestCase
from django.urls import reverse
from apps.articles.models import Tag
from apps.users.models import CustomUser
from .models import Tool
# Create your tests here.
test_tool = {
"name": "tool_name",
"slug": "tool_slug",
"description": "tool_description",
"img_link": "https://tool_img_link.com/tool.png",
"link": "https://tool_link.com",
}
test_tag = {
"tag_name": "test_tag",
"img_link": "https://test_tag.org/test.png",
"description": "long test description",
"slug": "test_tag",
"source_link": "https://test_tag.org/",
}
normal_user = {"username": "normal", "email": "normal@user.com", "password": "foo"}
class TestToolsModels(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tool_created(self):
obj = Tool.objects.get(name=self.test_obj["name"])
self.assertEqual(obj.name, self.test_obj["name"])
self.assertEqual(obj.slug, self.test_obj["slug"])
self.assertEqual(obj.description, self.test_obj["description"])
self.assertEqual(obj.img_link, self.test_obj["img_link"])
self.assertEqual(obj.link, self.test_obj["link"])
self.assertEqual(obj.tags.all()[0].tag_name, self.test_tag["tag_name"])
class TestToolsListViews(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tools_list_view(self):
obj = Tool.objects.all()
response = self.client.get(reverse("tools_list"))
self.assertQuerysetEqual(response.context["tools"], obj, transform=lambda x: x)
self.assertTemplateUsed(response, "tools/tools_list.html")
self.assertEqual(response.status_code, 200)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("tools_list"))
self.assertEqual(response.context["main_author"], main_author)
class TestToolsDetailViews(TestCase):
def setUp(self):
self.test_obj = test_tool
self.test_tag = test_tag
self.test_user = normal_user
CustomUser.objects.create_user(**self.test_user)
tool = Tool.objects.create(**self.test_obj)
tool.tags.add(Tag.objects.create(**self.test_tag))
tool.save()
def test_tools_detail_view(self):
obj = Tool.objects.get(name=self.test_obj["name"])
response = self.client.get(
reverse("tool_detail", kwargs={"slug": self.test_obj["slug"]})
)
self.assertEqual(response.context["tool"], obj)
self.assertTemplateUsed(response, "tools/tool_detail.html")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["tool"].name, self.test_obj["name"])
self.assertEqual(response.context["tool"].slug, self.test_obj["slug"])
self.assertEqual(
response.context["tool"].description, self.test_obj["description"]
)
self.assertEqual(response.context["tool"].img_link, self.test_obj["img_link"])
self.assertEqual(response.context["tool"].link, self.test_obj["link"])
self.assertEqual(
response.context["tool"].tags.all()[0].tag_name, self.test_tag["tag_name"]
)
def test_main_author(self):
main_author = CustomUser.objects.get(username=self.test_user["username"])
main_author.main_user = True
main_author.save()
response = self.client.get(reverse("tools_list"))
self.assertEqual(response.context["main_author"], main_author)
| akundev/akundotdev | apps/tools/tests.py | tests.py | py | 4,039 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.test.TestCase",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "models.Tool.objects.create",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "models.Tool.objects",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name... |
30086443751 | import os
import pickle
import numpy as np
from .util import draw_roc
from .statistic import get_EER_states, get_HTER_at_thr
from sklearn.metrics import roc_auc_score
def eval_acer(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
ind_n = (results[:, 1] == 0)
ind_p = (results[:, 1] == 1)
fp = (results[ind_n, 0] == 1).sum()
fn = (results[ind_p, 0] == 0).sum()
apcer = fp / ind_n.sum() * 100
bpcer = fn / ind_p.sum() * 100
acer = (apcer + bpcer) / 2
if is_print:
print('***************************************')
print('APCER BPCER ACER')
print('{:.4f} {:.4f} {:.4f}'.format(apcer, bpcer, acer))
print('***************************************')
return 100 - acer
def eval_hter(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
prob_list = results[:, 0]
label_list = results[:, 1]
cur_EER_valid, threshold, FRR_list, FAR_list = get_EER_states(prob_list, label_list)
auc_score = roc_auc_score(label_list, prob_list)
draw_roc(FRR_list, FAR_list, auc_score)
cur_HTER_valid = get_HTER_at_thr(prob_list, label_list, threshold)
if is_print:
print('***************************************')
print('EER HTER AUC Thr')
print('{:.4f} {:.4f} {:.4f} {:.4f}'.format(
cur_EER_valid * 100, cur_HTER_valid * 100, auc_score * 100, threshold))
print('***************************************')
return (1 - cur_HTER_valid) * 100
def eval_acc(results, is_print=False):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param is_print: print eval score
:return: score
"""
acc = (results[:, 0] == results[:, 1]).sum() / results.shape[0] * 100
if is_print:
print('*****************')
print('ACC Pos')
print('{:.2f} {}'.format(acc, int(results[:, 0].sum())))
print('*****************')
return acc
def eval_metric(results, thr='auto', type='acc', res_dir=None):
"""
:param results: np.array shape of (N, 2) [pred, label]
:param type: acc acer or hter
:param res_dir: save eval results
:return: best score
"""
eval_tools = dict(
acc=eval_acc,
acer=eval_acer,
hter=eval_hter)
results = np.array(results)
if type not in ['acc', 'acer', 'hter']:
raise NotImplementedError
elif type == 'hter':
eval_score = eval_hter(results, is_print=True)
return eval_score
else:
eval_tool = eval_tools[type]
if isinstance(thr, float):
results[:, 0] = (results[:, 0] > thr).astype(np.float)
results = results.astype(np.int)
return eval_tool(results, is_print=True)
min_score = results[:, 0].min()
max_score = results[:, 0].max()
s_step = (max_score - min_score) / 1000
scores = []
thrs = []
for i in range(1000):
thre = min_score + i * s_step
thrs.append(thre)
result = results.copy()
result[:, 0] = (results[:, 0] > thre).astype(np.float)
result = result.astype(np.int)
score = eval_tool(result, is_print=False)
scores.append(score)
max_ind = np.argmax(np.array(scores))
if thr == 'mid':
sinds = np.argsort(results[:, 0])
best_thr = results[sinds[int(results.shape[0]/2)-1], 0]
else:
best_thr = thrs[max_ind]
print('Best Threshold: {:.4f}'.format(best_thr))
save_results = np.zeros((results.shape[0], 3))
save_results[:, 2] = results[:, 0]
results[:, 0] = (results[:, 0] > best_thr).astype(np.float)
save_results[:, :2] = results[:, :2]
eval_score = eval_tool(results, is_print=True)
if res_dir is not None:
res_dir = os.path.join(res_dir, 'res_{}.pkl'.format(int(eval_score * 10)))
with open(res_dir, 'wb') as file:
pickle.dump(save_results, file)
return eval_score
| VIS-VAR/LGSC-for-FAS | utils/eval.py | eval.py | py | 4,112 | python | en | code | 223 | github-code | 6 | [
{
"api_name": "statistic.get_EER_states",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.roc_auc_score",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "util.draw_roc",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "... |
32509272843 | import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
import scipy.stats as sc
def myRho(T,n):
COV=0;
pq =0 ;
SY=0;
SX=0;
EX=sum(T[0][0:])/float(len(T[0][0:])) #La experence de x
EY=sum(T[1][0:])/float(len(T[1][0:])) #La experence de y
for i in range(n):
COV = COV + (T[0][i] - EX) * (T[1][i] - EY)
SX = SX + (T[0][i] - EX) ** 2
SY = SY + (T[1][i] - EY) ** 2
pq = COV /sqrt(SX * SY) #La correlation
COV = COV / n #La covolution de x y
t=pq/sqrt((1-pq**2)/(n-2)) #La statistique sur la correlation de x y
return (COV,pq,t)
def mysort(X) :
for b in range(0,2):
GR = 0
T = []
T=X[b][0:]
Tu = np.unique(T)
if b==0 :
GRAX = np.zeros(np.shape(T), dtype='float')
if b==1 :
GRAX1 = np.zeros(np.shape(T), dtype='float')
for i in Tu:
iNd = np.where(T == i)[0]
nx = len(iNd)
if b==0:
GRAX[iNd] = np.mean(np.arange(GR, GR + nx))
GR = GR + nx
if b==1:
GRAX1[iNd] = np.mean(np.arange(GR, GR + nx))
GR = GR + nx
return(GRAX,GRAX1)
X = np.random.uniform (0, 12, 500)
Y=np.exp(X)+np.random.normal(0,1,500)
M = np.zeros((500, 2));
A = np.random.randint(0, 15, 10)
B=np.random.randint(0, 15, 10)
C=list([X,Y])
D,R=mysort(C)
print(C)
print(D,R)
plt.figure(2)
plt.scatter(D,R, marker='+')
C=list([D,R])
a=myRho(C,500)
print(a)
plt.show()
| Varelafv/TD6.py | exo3.py | exo3.py | py | 1,511 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "math.sqrt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 27,... |
29214477520 | import pytest
from datetime import datetime
from ..forms import PostForm, CategoryForm, CommentForm
from accounts.models import Profile, User
from ..models import Post
@pytest.fixture
def create_test_user():
data = {"email": "test@test.com", "password": "a/1234567"}
return User.objects.create_user(**data, is_verify=True)
@pytest.fixture
def user_profile(create_test_user):
user = create_test_user
return Profile.objects.get(user=user)
@pytest.fixture
def create_post(user_profile):
data = {
"author": user_profile,
"title": "test title",
"content": "test content",
"published_date": datetime.now(),
}
return Post.objects.create(**data)
class TestPostForm:
def test_post_form_valid_data(self):
data = {
"title": "test title",
"content": "test content",
"published_date": datetime.now(),
"captcha": 1,
}
post = PostForm(data=data)
# post is not valid because of captcha field
assert not post.is_valid()
assert len(post.errors) == 1
assert post.has_error("captcha")
def test_post_form_missing_field_data(self):
data = {
"title": "test title",
"content": "test content",
"captcha": 1,
}
post = PostForm(data=data)
# post is not valid because of captcha field and
# missing published_date field
assert not post.is_valid()
assert len(post.errors) == 2
assert post.has_error("captcha")
assert post.has_error("published_date")
def test_post_form_no_data(self):
post = PostForm()
assert not post.is_valid()
@pytest.mark.django_db
class TestCommentForm:
def test_comment_form_valid_data(self, create_post):
data = {
"post": create_post.id,
"name": "test",
"email": "test@test.com",
"message": "test message",
}
comment = CommentForm(data=data)
assert comment.is_valid()
assert len(comment.errors) == 0
def test_comment_form_invalid_data(self, create_post):
data = {
"post": create_post.id,
"name": "test",
"email": "invalid email format",
"message": "test message",
}
comment = CommentForm(data=data)
assert not comment.is_valid()
assert len(comment.errors) == 1
def test_comment_form_invalid_data_missing_post_field(self):
data = {
"name": "test",
"email": "test@test.com",
"message": "test message",
}
comment = CommentForm(data=data)
assert not comment.is_valid()
assert len(comment.errors) == 1
class TestCategoryForm:
def test_category_form_valid_data(self):
data = {"name": "test", "captcha": 1}
cat = CategoryForm(data=data)
# category is not valid because of captcha field
assert not cat.is_valid()
assert len(cat.errors) == 1
assert cat.has_error("captcha")
def test_category_form_invalid_data(self):
cat = CategoryForm()
assert not cat.is_valid()
| smz6990/DRF-Blog | core/blog/tests/test_forms.py | test_forms.py | py | 3,203 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "accounts.models.User.objects.create_user",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "accounts.models.User.objects",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "accounts.models.User",
"line_number": 12,
"usage_type": "name"... |
29913794129 | #
# VIK Example file for working with date information
#
import os
import calendar
from datetime import date, time, datetime
def main():
os.system('clear')
## DATE OBJECTS
# Get today's date from the simple today() method from the date class
# today = date.today()
# print("Today's date is", date.today())
# print out the date's individual components
# print("Date component - Day:", date.today().day)
# print("Date component - Month:", date.today().month)
# print("Date component - Year:", date.today().year)
# retrieve today's weekday and month (0=Monday, 6=Sunday; 1=January, 12=December)
# print("Today's weekday number is", date.today().weekday())
# print("Today's month number is", date.today().month)
# Define array/list of weekday names
DayOfWeek = ["Monday","Tuesday","Wednesday","Thursday","Friday","Saturday","Sunday"]
# Define array/list of month names
MonthOfYear = ["January","February","March","April","May","June","July","August","September","October","November","December"]
# print ("Which is a", DayOfWeek[date.today().weekday()], "in", MonthOfYear[date.today().month])
# The above is called 'indexing into an array'
# Ask user for name
Name = input("\nHello there! What's your name? ")
# Ask user for birthday
BirthdayInput = input("\nWhen were you born (m/d/yyyy)? ")
# Convert birthday input to date
Birthday = datetime.strptime(BirthdayInput,'%m/%d/%Y')
# print(Birthday)
# Date verb tense
if Birthday.date() < date.today():
DateTense = "fell on"
elif Birthday.date() == date.today():
DateTense = "is today - Happy Birthday!"
else:
DateTense = "will fall on"
# Create and display a single-month calendar based on birthday input
BirthdayCalendar = calendar.TextCalendar(calendar.MONDAY)
BirthdayCalendarDisplay = BirthdayCalendar.formatmonth(Birthday.date().year,Birthday.date().month)
print("\n\n",BirthdayCalendarDisplay,"\n\n")
# Calculate difference in days between birth date and today
BirthdayDiff = abs(Birthday.date() - date.today())
# Calculate age in years
AgeThisYear = date.today().year - Birthday.date().year
# Determine birthday this year
BirthdayThisYear = date(date.today().year,Birthday.date().month,Birthday.date().day)
# Calculate difference in days between today and next birthday this year
if abs(BirthdayThisYear - date.today()) < 14:
BirthdayNext = abs(BirthdayThisYear - date.today())
BirthdayNextUnit = "days"
elif abs(BirthdayThisYear - date.today()) < 32:
BirthdayNext = abs(BirthdayThisYear - date.today()) / 7
BirthdayNextUnit = "weeks"
elif abs(BirthdayThisYear - date.today()) >= 32:
BirthdayNext = abs(BirthdayThisYear - date.today()) / 30.5
BirthdayNextUnit = "months"
# Symbols for future use
Sunny = '\u263c'
Cloudy = '\u2601'
Rainy = '\u2614'
print(
Name,", your birth date,",
MonthOfYear[Birthday.date().month-1],
Birthday.date().day,",",
Birthday.date().year,
DateTense,"a",
DayOfWeek[Birthday.date().weekday()],
BirthdayDiff.days,"days ago."
)
if BirthdayThisYear < date.today():
print("\nYour birthday has passed this year.\nYou turned",AgeThisYear,"years old.\n")
elif BirthdayThisYear == date.today():
print("\nIt's your birthday today - *** HAPPY BIRTHDAY! *** \nYou turned",AgeThisYear,"years old.\n")
else:
print("\nYour birthday is coming up later this year in",BirthdayNext,BirthdayNextUnit,"\n")
## DATETIME OBJECTS
# Get today's date from the datetime class
# Get the current time
if __name__ == "__main__":
main()
| VikramDMello/Python-Learning | src/Lynda.com Exercise Files/Ch3/dates_start.py | dates_start.py | py | 3,633 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.system",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "datetime.date.... |
31315499323 | ###########
# This script builds the database for the web visualization
# It can take a long time to run, so it is recommended to run it in the background
# Here we we are going to take a folder of ABF or NWB files, and extract some features
# we will choose to use a custom backend or ipfx to extract the features
# from each file. We will then save the features in a database file.
# The database file will be used by the web visualization to display the data.
# The database file is a JSON file, or csv
###########
# Import libraries
import os
import sys
import json
import glob
import argparse
import pandas as pd
import numpy as np
import logging
from functools import partial
import copy
import joblib
import matplotlib.pyplot as plt
import scipy.stats
from multiprocessing import pool, freeze_support
# Import ipfx
import ipfx
import ipfx.script_utils as su
from ipfx.stimulus import StimulusOntology
import allensdk.core.json_utilities as ju
from ipfx.bin import run_feature_collection
from ipfx import script_utils as su
from ipfx.sweep import SweepSet, Sweep
import ipfx.stim_features as stf
import ipfx.stimulus_protocol_analysis as spa
import ipfx.data_set_features as dsf
import ipfx.time_series_utils as tsu
import ipfx.feature_vectors as fv
from ipfx.dataset.create import create_ephys_data_set
# Import custom functions
from pyAPisolation import patch_utils
from pyAPisolation.loadNWB import loadNWB, GLOBAL_STIM_NAMES
try:
from pyAPisolation.dev import stim_classifier as sc
except:
print("Could not import stim_classifier")
# ==== GLOBALS =====
_ONTOLOGY = ju.read(StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE)
_UNIT_ONTOLOGY = {'amp': ['amp', 'ampere', 'amps', 'amperes', 'A'],'volt': ['volt', 'v', 'volts', 'V'], 'sec': ['sec', 's', 'second', 'seconds', 'secs', 'sec']}
log = logging.getLogger(__name__)
def glob_files(folder, ext="nwb"):
#this function will take a folder and a file extension, and return a list of files
# with that extension in that folder
return glob.glob(folder + "/**/*." + ext, recursive=True)
def run_analysis(folder, backend="ipfx", outfile='out.csv', ext="nwb", parallel=False):
files = glob_files(folder)[::-1]
file_idx = np.arange(len(files))
if backend == "ipfx":
# Use ipfx to extract features
#get_stimulus_protocols(files)
GLOBAL_STIM_NAMES.stim_inc = ['']
GLOBAL_STIM_NAMES.stim_exc = []
get_data_partial = partial(data_for_specimen_id,
passed_only=False,
data_source='filesystem',
ontology=None,
file_list=files)
if parallel == True:
#Run in parallel
parallel = joblib.cpu_count()
results = joblib.Parallel(n_jobs=1, backend='multiprocessing')(joblib.delayed(get_data_partial)(specimen_id) for specimen_id in file_idx)
elif backend == "custom":
raise(NotImplementedError)
# Use custom backend to extract features
results = []
for f in files:
# Extract features from each file
result = feature_extraction.extract_features(f)
results.append(result)
# Save results
df = pd.DataFrame(results)
df.to_csv(outfile, index=False)
results = pd.DataFrame().from_dict(results).set_index('specimen_id')
return results
def main():
#main function to be called when running this script
# Handle command line arguments
parser = argparse.ArgumentParser(description='Build database for web visualization')
parser.add_argument('folder', type=str, help='Folder containing ABF or NWB files')
parser.add_argument('--backend', type=str, default="ipfx", help='Backend to use for feature extraction')
parser.add_argument('--outfile', type=str, default="out.csv", help='Output file name')
args = parser.parse_args()
# Run analysis
run_analysis(args.folder, args.backend, args.outfile)
#======== IPFX functions ===========
#clone the function from ipfx/stimulus_protocol_analysis.py
# here we will modify it to handle test pulses intelligently, then overwrite the function in ipfx for this session
def get_stim_characteristics(i, t, test_pulse=True, start_epoch=None, end_epoch=None, test_pulse_length=0.250):
"""
Identify the start time, duration, amplitude, start index, and end index of a general stimulus.
"""
fs = 1/(t[1] - t[0])
di = np.diff(i)
di_idx = np.flatnonzero(di) # != 0
start_idx_idx = 0
if len(di_idx[start_idx_idx:]) == 0: # if no stimulus is found
return None, None, 0.0, None, None
#here we will check if the first up/down is a test pulse, and skip it if it is
#we are assuming that the test pulse is within the first 250ms of the stimulus
#TODO make this more robust
if len(di_idx) > 3: # if there are more than 3 up/down transitions, there is probably a test pulse
if (di_idx[1]) < test_pulse_length*fs: # skip the first up/down (test pulse) if present, and with in the first 250ms
start_idx_idx = 2
else:
start_idx_idx = 0
elif len(di_idx) < 3:
start_idx_idx = 0
start_idx = di_idx[start_idx_idx] + 1 # shift by one to compensate for diff()
end_idx = di_idx[-1]
if start_idx >= end_idx: # sweep has been cut off before stimulus end
return None, None, 0.0, None, None
start_time = float(t[start_idx])
duration = float(t[end_idx] - t[start_idx-1])
stim = i[start_idx:end_idx+1]
peak_high = max(stim)
peak_low = min(stim)
if abs(peak_high) > abs(peak_low):
amplitude = float(peak_high)
else:
amplitude = float(peak_low)
return start_time, duration, amplitude, start_idx, end_idx
ipfx.stim_features.get_stim_characteristics = get_stim_characteristics
def parse_long_pulse_from_dataset(data_set):
sweeps = []
start_times = []
end_times = []
for sweep in np.arange(len(data_set.dataY)):
i = data_set.dataC[sweep]*1
t = data_set.dataX[sweep]
v = data_set.dataY[sweep]
dt = t[1] - t[0]
#if its not current clamp
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"]) != "amp":
continue
#if the sweep v is in volts, convert to mV, ipfx wants mV
if match_unit(data_set.sweepMetadata[sweep]['resp_dict']["unit"]) == "volt":
#sometimes the voltage is in volts, sometimes in mV, this is a hack to fix that
if np.max(v) > 500 and np.min(v) < -500:
#possibly in nV or something else, convert to mV anyway
v = v/1000
elif np.max(v) < 1 and np.min(v) > -1:
#probably in volts, convert to mV
v = v*1000
#if the sweep i is in amps, convert to pA, ipfx wants pA
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"])=="amp":
if np.max(i) < 0.1 and np.min(i) > -0.1:
#probably in amp, convert to picoAmps
i = np.rint(i*1000000000000).astype(np.float32)
else:
#probably in pA already
i = np.rint(i).astype(np.float32)
#sometimes i will have a very small offset, this will remove it
i[np.logical_and(i < 5, i > -5)] = 0
if match_protocol(i, t) != "Long Square":
continue
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t)
if start_time is None:
continue
#construct a sweep obj
start_times.append(start_time)
end_times.append(start_time+duration)
sweep_item = Sweep(t, v, i, clamp_mode="CurrentClamp", sampling_rate=int(1/dt), sweep_number=sweep)
sweeps.append(sweep_item)
return sweeps, start_times, end_times
def data_for_specimen_id(specimen_id, passed_only, data_source, ontology, file_list=None, amp_interval=20, max_above_rheo=100, debug=True):
result = {}
result["specimen_id"] = file_list[specimen_id]
try:
#this is a clone of the function in ipfx/bin/run_feature_collection.py,
# here we are gonna try to use it to handle data that may not be in an NWB format IPFX can handle
_, _, _, _, data_set = loadNWB(file_list[specimen_id], return_obj=True)
if data_set is None or len(data_set.dataY)<1:
return result
#here we are going to perform long square analysis on the data,
#ipfx does not play nice with many NWBs on dandi, so we are going to link into the lower level functions
#and do the analysis ourselves
#hopefully this will be fixed in the future and we can use ipfx for this
sweeps = []
start_times = []
end_times = []
debug_log = {}
for sweep in np.arange(len(data_set.dataY)):
i = np.nan_to_num(data_set.dataC[sweep]*1)
t = data_set.dataX[sweep]
v = np.nan_to_num(data_set.dataY[sweep])
dt = t[1] - t[0]
#if its not current clamp
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"]) != "amp":
logging.debug(f"sweep {sweep} is not current clamp")
#debug_log[sweep] = "not current clamp"
continue
#if the sweep v is in volts, convert to mV, ipfx wants mV
if match_unit(data_set.sweepMetadata[sweep]['resp_dict']["unit"]) == "volt":
#sometimes the voltage is in volts, sometimes in mV, even thought it is logged as bolts this is a hack to fix that
if np.max(v) > 500 and np.min(v) < -500:
#possibly in nV or something else, convert to mV anyway
v = v/1000
elif np.max(v) < 1 and np.min(v) > -1:
#probably in volts, convert to mV
v = v*1000
#if the sweep i is in amps, convert to pA, ipfx wants pA
if match_unit(data_set.sweepMetadata[sweep]['stim_dict']["unit"])=="amp":
if np.max(i) < 0.1 and np.min(i) > -0.1:
#probably in amp, convert to picoAmps
i = i*1000000000000
#probably in pA already
#i[np.logical_and(i < 5, i > -5)] = 0
#try to figure out if this is a long square
if match_protocol(i, t) != "Long Square":
logging.debug(f"skipping sweep {sweep} because it is not a long square")
debug_log[sweep] = "likely not a long square"
continue
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t)
if QC_voltage_data(t, v, i) == 0:
logging.debug(f"skipping sweep {sweep} because it failed QC")
debug_log[sweep] = "failed QC"
continue
#construct a sweep obj
start_times.append(start_time)
end_times.append(start_time+duration)
sweep_item = Sweep(t, v, i, clamp_mode="CurrentClamp", sampling_rate=int(1/dt), sweep_number=sweep)
sweeps.append(sweep_item)
if debug:
for sweep in debug_log.keys():
print(f"sweep {sweep} failed QC because it was {debug_log[sweep]}")
if debug_log[sweep] == "failed QC":
plt.plot(data_set.dataX[sweep], data_set.dataY[sweep], label=f"{sweep} {debug_log[sweep]}", c='r')
else:
#plt.plot(data_set.dataX[sweep], data_set.dataY[sweep], label=f"{sweep} {debug_log[sweep]}", c='k')
continue
#plt.legend()
plt.pause(0.2)
#get the most common start and end times
start_time = scipy.stats.mode(np.array(start_times))[0][0]
end_time = scipy.stats.mode(np.array(end_times))[0][0]
#index out the sweeps that have the most common start and end times
idx_pass = np.where((np.array(start_times) == start_time) & (np.array(end_times) == end_time))[0]
sweeps = SweepSet(np.array(sweeps, dtype=object)[idx_pass].tolist())
lsq_spx, lsq_spfx = dsf.extractors_for_sweeps(
sweeps,
start=start_time , #if the start times are not the same, this will fail
end=end_time, #if the end times are not the same, this will fail
min_peak=-25,
)
lsq_an = spa.LongSquareAnalysis(lsq_spx, lsq_spfx,
subthresh_min_amp=-100.0)
if np.mean(start_times) < 0.01:
lsq_an.sptx.baseline_interval = np.mean(start_times)*0.1
lsq_an.sptx.sag_baseline_interval = np.mean(start_times)*0.1
lsq_features = lsq_an.analyze(sweeps)
result.update({
"input_resistance": lsq_features["input_resistance"],
"tau": lsq_features["tau"],
"v_baseline": lsq_features["v_baseline"],
"sag_nearest_minus_100": lsq_features["sag"],
"sag_measured_at": lsq_features["vm_for_sag"],
"rheobase_i": int(lsq_features["rheobase_i"]),
"fi_linear_fit_slope": lsq_features["fi_fit_slope"],
})
# Identify suprathreshold set for analysis
sweep_table = lsq_features["spiking_sweeps"]
mask_supra = sweep_table["stim_amp"] >= lsq_features["rheobase_i"]
sweep_indexes = fv._consolidated_long_square_indexes(sweep_table.loc[mask_supra, :])
amps = np.rint(sweep_table.loc[sweep_indexes, "stim_amp"].values - lsq_features["rheobase_i"])
spike_data = np.array(lsq_features["spikes_set"])
for amp, swp_ind in zip(amps, sweep_indexes):
if (amp % amp_interval != 0) or (amp > max_above_rheo) or (amp < 0):
continue
amp_label = int(amp / amp_interval)
first_spike_lsq_sweep_features = run_feature_collection.first_spike_lsq(spike_data[swp_ind])
result.update({"ap_1_{:s}_{:d}_long_square".format(f, amp_label): v
for f, v in first_spike_lsq_sweep_features.items()})
mean_spike_lsq_sweep_features = run_feature_collection.mean_spike_lsq(spike_data[swp_ind])
result.update({"ap_mean_{:s}_{:d}_long_square".format(f, amp_label): v
for f, v in mean_spike_lsq_sweep_features.items()})
sweep_feature_list = [
"first_isi",
"avg_rate",
"isi_cv",
"latency",
"median_isi",
"adapt",
]
result.update({"{:s}_{:d}_long_square".format(f, amp_label): sweep_table.at[swp_ind, f]
for f in sweep_feature_list})
result["stimulus_amplitude_{:d}_long_square".format(amp_label)] = int(amp + lsq_features["rheobase_i"])
rates = sweep_table.loc[sweep_indexes, "avg_rate"].values
result.update(run_feature_collection.fi_curve_fit(amps, rates))
#we should record the name of the stimuli used and the sweeps used
except Exception as e:
print("error with specimen_id: ", specimen_id)
print(e)
plt.close()
return result
plt.close()
return result
def find_time_index(t, t_0):
""" Find the index value of a given time (t_0) in a time series (t).
Parameters
----------
t : time array
t_0 : time point to find an index
Returns
-------
idx: index of t closest to t_0
"""
if t[0] <= t_0 <= t[-1]: "Given time ({:f}) is outside of time range ({:f}, {:f})".format(t_0, t[0], t[-1])
if t_0 < t[0]:
t_0 = t[0]
if t_0 > t[-1]:
t_0 = t[-1]
idx = np.argmin(abs(t - t_0))
return idx
tsu.find_time_index = find_time_index
#stimulus protocol analysis functions, here we will guess what stimulus protocol was used, and affix that to the stimulus ontology later
def get_stimulus_protocols(files, ext="nwb", method='random'):
#this function is going to take a folder and a file extension, and return a list of stimulus protocols, then guess what type of stimulus protocol was used
#method can be random, first, or all
#random will choose 10 random files and try to guess the stimulus protocol
if method == 'random':
files = np.random.choice(files, min(100, len(files)))
elif method == 'first':
files = files[0]
elif method == 'all':
pass
#here we are gonna set the GLOBAL_STIM_NAMES filter to blank, so that we can get all the stimulus names
GLOBAL_STIM_NAMES.stim_inc = ['']
GLOBAL_STIM_NAMES.stim_exc = []
classifier = sc.stimClassifier()
stim_to_use = []
for i, f in enumerate(files):
_, _, _, _, data_set = loadNWB(f, return_obj=True)
#
#[plt.plot(x) for x in data_set.dataY]
#plt.show()
for j in np.arange(len(data_set.dataY)):
sweep_meta = data_set.sweepMetadata[j]
i = data_set.dataC[j]
t = data_set.dataX[j]
#stim_protocol = match_protocol(i, t) #stimulus protocol is the matching protocol
stim_protocol = classifier.predict(i)
#reference mapped to the allen protocol names
if stim_protocol is not None:
#add stim_protocol to ontology
stim_name_1 = sweep_meta['description']
stim_name_2 = sweep_meta['stimulus_description']
for stim_name in [stim_name_1, stim_name_2]:
if stim_name not in GLOBAL_STIM_NAMES.stim_inc:
if stim_name != '' and stim_name != 'N//A' and stim_name != 'NA' and stim_name != 'N/A':
stim_to_use.append(stim_name)
GLOBAL_STIM_NAMES.stim_inc = stim_to_use
return copy.deepcopy(GLOBAL_STIM_NAMES)
def match_protocol(i, t, test_pulse=True, start_epoch=None, end_epoch=None, test_pulse_length=0.1):
#this function will take a stimulus and return the stimulus protocol at least it will try
#first we will try to match the stimulus protocol to a known protocol
classifier = sc.stimClassifier()
start_time, duration, amplitude, start_idx, end_idx = get_stim_characteristics(i, t, test_pulse=test_pulse, start_epoch=start_epoch, end_epoch=end_epoch, test_pulse_length=test_pulse_length)
pred = classifier.decode(classifier.predict(i.reshape(1, -1)))[0]
if pred=="long_square":
return "Long Square"
if start_time is None:
#if we can't find the start time, then we can't identify the stimulus protocol
return None
if duration > 0.25:
#if the stimulus is longer than 500ms, then it is probably a long square
return match_long_square_protocol(i, t, start_idx, end_idx)
elif duration < 0.1:
#if the stimulus is less than 100ms, then it is probably a short square
return match_short_square_protocol(i, t)
else:
#check if ramp
return match_ramp_protocol(i, t)
def match_long_square_protocol(i, t, start_idx, end_idx):
#here we will do some analysis to determine if the stimulus is a long square, and if so, what the parameters are
fs = 1/(t[1] - t[0])
di = np.diff(i)
di_idx = np.flatnonzero(di) # != 0
if len(di_idx) == 0:
#if there are no up/down transitions, then this is not a long square
return None
if len(di_idx) == 1:
#if there is only one up/down transition, then this is not a long square
return None
#if len(di_idx) > 6:
#if there are more than 6 up/down transitions, then this is (probably) not a long square
# return
#check if its a long square by fitting a line to the dataset,
#and checking if the slope is 0
#if the slope is 0, then it is a long square
#if the slope is not 0, then it is not a long square
if len(di_idx) > 6:
y_data = i[start_idx: end_idx]
x_data = t[start_idx: end_idx]
slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x_data, y_data)
if slope < 0.1 and p_value < 0.05 and r_value > 0.6:
return 'Long Square'
elif slope > 0.1 and p_value > 0.05:
return 'Long Square'
else:
return None
#ensure that the stim starts at 0, and ends at 0
if i[0] != 0:
return None
if i[-1] != 0:
return None
return "Long Square"
def match_short_square_protocol(stimulus_protocol, ontology):
#TODO: implement this function
pass
def match_ramp_protocol(stimulus_protocol, ontology):
#TODO: implement this function
pass
def match_unit(unit, ontology=_UNIT_ONTOLOGY):
#this function will take a unit and return the unit ontology
for unit_name in ontology:
check = [unit.upper() in x.upper() for x in ontology[unit_name]]
if np.any(check):
return unit_name
return None
def QC_voltage_data(t,v,i, zero_threshold=0.2, noise_threshold=10):
#this function will take a voltage trace and return a QC score
#Sometimes the voltage trace is not a voltage trace, but rather a current trace
#or with IGOR / MIES generated NWB files, the sweep was aborted halfway through, and there is a large jump in the voltage trace, and a bunhc of zeros
#this function will check for these things and return a QC score
#if the QC score is 0, then the sweep is bad
#if the QC score is 1, then the sweep is good
if v is None:
return 0
if i is None:
return 0
if len(v) == 0:
return 0
if np.any(v > 500) or np.any(v < -500): #membrane voltages are very very unlikely to be this large, this threshold could be lowered
return 0
#check for extended periods of 0
if np.sum(v == 0) > zero_threshold*len(v): #if more than 10% of the trace is 0, then it was probably aborted
#this is only a problem if the current is not 0
#check if while the voltage is 0, the current is 0
idx_zero = np.flatnonzero(np.isclose(v, 0))
if np.sum(i[idx_zero] != 0) > (zero_threshold/2)*len(idx_zero):
return 0
else:
return 1
#check for large jumps in the voltage trace
#dv = np.diff(v)
#if np.any(np.abs(dv) > 1e9):
#return 0
#todo, more qc checks
return 1
if __name__ == "__main__":
freeze_support()
#call main
main()
| smestern/pyAPisolation | pyAPisolation/web_viz/build_database.py | build_database.py | py | 22,652 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "allensdk.core.json_utilities.read",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "allensdk.core.json_utilities",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "ipfx.stimulus.StimulusOntology.DEFAULT_STIMULUS_ONTOLOGY_FILE",
"line_number":... |
18803588453 | from django.urls import path, include
from watchlist_app.api import views
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('stream', views.StreamPlatformVS,
basename='streamplatform')
urlpatterns = [
path('list/', views.WatchListAV.as_view(), name='Watch-list'),
path('<int:pk>/', views.WatchDetailsAV.as_view(), name='Watch-details'),
path('list2/', views.WatchListGV.as_view(), name='Watch-list-new'),
path('', include(router.urls)),
# path('stream/', views.StreamPlatformAV.as_view(), name='stream'),
# path('stream/<int:pk>', views.StreamDetailsAV.as_view(),
# name='streamplatform-detail'),
# path('review/', views.ReviewList.as_view(), name='review-list'),
# path('review/<int:pk>', views.ReviewDetail.as_view(), name='review-detail'),
path('<int:pk>/review-create',
views.ReviewCreate.as_view(), name='review-create'),
path('<int:pk>/reviews/', views.ReviewList.as_view(), name='review-list'),
path('review/<int:pk>/',
views.ReviewDetail.as_view(), name='review-detail'),
path('reviews/',
views.UserReview.as_view(), name='user-review-detail'),
]
| aliesmaeli79/watchmateAPI | watchlist_app/api/urls.py | urls.py | py | 1,206 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "watchlist_app.api.views.StreamPlatformVS",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "watchlist_app.api.views",
"line_number": 7,
"usage_type"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.