text stringlengths 70 452k | dataset stringclasses 2 values |
|---|---|
Nasm: Hide messagem in bmp file
I made this program in assembly that should receive from the terminal one file txt (where's the msg is), one file bmp and the name of that the file bmp should have (file equal to the original but with the hidden msg)!
section .data
; -----
; Define standard constants.
LF equ 10 ; line feed
NULL equ 0 ; end of string
TRUE equ 1
FALSE equ 0
EXIT_SUCCESS equ 0 ; success code
STDIN equ 0 ; standard input
STDOUT equ 1 ; standard output
STDERR equ 2 ; standard error
SYS_write equ 1; write
SYS_read equ 0 ; read
SYS_open equ 2 ; file open
SYS_close equ 3 ; file close
SYS_exit equ 60 ; terminate
SYS_creat equ 85 ; file open/create
SYS_time equ 201 ; get time
O_CREAT equ 0x40
O_TRUNC equ 0x200
O_APPEND equ 0x400
O_RDONLY equ 000000q ; read only
O_WRONLY equ 000001q ; write only
S_IRUSR equ 00400q
S_IWUSR equ 00200q
S_IXUSR equ 00100q
; -----
; Variables/constants for main.
MSGBUFF_SIZE equ 256
IMGBUFF_SIZE equ 24000
NEWBUFF_SIZE equ 24000
newLine db LF, NULL
db LF, LF, NULL
msgDesc dq 1
imgDesc dq 1
newDesc dq 1
errMsgOpen db "Error opening the file.", LF, NULL
errMsgRead db "Error reading from the file.", LF, NULL
img_url db '-----------------',0
msg_url db '-----------------',0
new_img db '-----------------',0
nullstr db '(null)',0
;offset db 1
;size db 1
; -------------------------------------------------------
section .bss
readMsgBuffer: resb MSGBUFF_SIZE
readImgBuffer: resb IMGBUFF_SIZE
readNewBuffer: resb NEWBUFF_SIZE
offset resq 1
size resq 1
; -------------------------------------------------------
section .text
global _start
_start:
mov rbp, rsp
mov rax, [rbp+8] ; argc
cmp rax, 4
jne fim
;read args
mov rax, [rbp+8*3] ;argv[1]
mov rdi, msg_url ;
call converte
mov rax, [rbp+8*4] ;argv[2]
mov rdi, img_url ;
call converte
mov rax, [rbp+8*5] ;argv[3]
mov rdi, new_img ;
call converte
;open file msg
openMsg:
mov rax, SYS_open ; file open
mov rdi, msg_url ; file name string
mov rsi, O_RDONLY ; read only access
syscall ; call the kernel
cmp rax, 0 ; check for success
jl errorOnOpen
mov qword [msgDesc], rax
;open file img
openImg:
mov rax, SYS_open ; file open
mov rdi, img_url ; file name string
mov rsi, O_RDONLY ; read only access
syscall ; call the kernel
cmp rax, 0 ; check for success
jl errorOnOpen
mov qword [imgDesc], rax
;open file new image
openNewImg:
mov rax, SYS_open ; file open
mov rdi, new_img ; file name string
mov rsi, O_APPEND ; append
syscall ; call the kernel
cmp rax, 0 ; check for success
jl errorOnOpen
mov qword [newDesc], rax
;read msg
mov rax, SYS_read
mov rdi, qword [msgDesc]
mov rsi, readMsgBuffer
mov rdx, MSGBUFF_SIZE
syscall
cmp rax, 0
jl errorOnRead
;read img
mov rax, SYS_read
mov rdi, qword [imgDesc]
mov rsi, readImgBuffer
mov rdx, IMGBUFF_SIZE
syscall
cmp rax, 0
jl errorOnRead
mov rsi, readImgBuffer
mov byte [rsi+rax], NULL
mov rdi, readImgBuffer
mov rsi, readMsgBuffer
mov byte [rsi+rax], NULL
mov r8, readMsgBuffer
call escrever
;close files
mov rax, SYS_close
mov rdi, qword [msgDesc]
syscall
mov rax, SYS_close
mov rdi, qword [imgDesc]
syscall
mov rax, SYS_close
mov rdi, qword [newDesc]
syscall
fim:
mov rsp, rbp
pop rbp
mov rax, 1
xor rbx, rbx
int 0x80
ret
errorOnOpen:
mov rdi, errMsgOpen
call printString
jmp fim
errorOnRead:
mov rdi, errMsgRead
call printString
jmp fim
escrever:
push rbp
mov rbp, rsp
push rbx
; first 10 bytes
mov rax, SYS_write ; code for write()
mov rsi, rdi ; addr of characters
mov rdi, newDesc ; file descriptor
mov rdx, 10
syscall ; system call
mov rbx, readImgBuffer
mov rdx, 0
add rbx, 10
;eax = offset
offsetBit:
cmp rdx, 4
je loop
mov r9b, byte[rbx]
add rax, r9
mov rcx, 10
mul rcx
inc rdx
jmp offsetBit
mov r9, rax
mov rax, SYS_write ; code for write()
mov rsi, rbx ; addr of characters
mov rdi, newDesc ; file descriptor
mov rdx, r9
syscall ; system call
mov r12, 0 ;count byte pixel, to jump the 4ºbit
mov r13, 0 ;size file
mov rdi, readImgBuffer ;have position of content
loop:
cmp byte [r8], NULL ;not done
je done
inc r13 ;count one byte
mov rdx, 0 ;count bit character msg
caracter:
mov sil, byte[r8] ;read one byte of the pixel
cmp rdx, 8 ;end of last bit
je loop
cmp r12, 4 ;4º byte pixel
jne continue
mov r12, 0 ;reset byte pixel
continue:
mov al, byte[rdi] ;have byte of pixel
mov cl, 10
shr al, 1
mul cl
shl sil, 1
adc al, 0
mov byte[rdi], al ;modify last bit
inc rdx ;change bit of character
inc r8 ;change byte of pixel
jmp caracter
done:
mov rdi, readImgBuffer
add rdi, r9 ; offset position
mov rax, SYS_write ; code for write()
mov rsi, rdi ; addr of characters
mov rdi, newDesc ; file descriptor newImage
mov rdx, r13
syscall
pop rbx
pop rbp
ret
global printString
printString:
push rbp
mov rbp, rsp
push rbx
; Count characters in string.
mov rbx, rdi
mov rdx, 0
strCountLoop:
cmp byte [rbx], NULL
je strCountDone
inc rdx
inc rbx
jmp strCountLoop
strCountDone:
cmp rdx, 0
je prtDone
; Call OS to output string.
mov rax, SYS_write ; code for write()
mov rsi, rdi ; addr of characters
mov rdi, STDOUT ; file descriptor
; count set above
syscall ; system call
; String printed, return to calling routine.
prtDone:
pop rbx
pop rbp
ret
converte:
push rbx
push rcx
push rdx
mov rbx, 10
xor rcx, rcx
.J1:
xor rdx, rdx
div rbx
push dx
add cl, 1
or eax, eax
jnz .J1
mov rbx, rcx
.J2:
pop ax
or al, 00110000b ; to ASCII
mov [rdi], al ; Store AL to [EDI] (EDI is a pointer to a buffer)
add rdi, 1 ; = inc edi
loop .J2 ; until there are no digits left
mov byte [rdi], 0 ; ASCIIZ terminator (0)
mov rax, rbx ; Restore Count of digits
pop rdx
pop rcx
pop rbx
ret
I run it like:
$ nasm -F dwarf -f elf64 hiddeMsg.asm
$ ld -o HideMsg hiddeMsg.o
$ ./HiddeMsg msg.txt img.bmp img_mod.bmp
and I was expecting to "generate" one image with name img_mod.bmp equal to the original (aka img.bmp) but with the msg in msg.txt hidden... But as I compile and run it doesn't show any errors but also don't do anything and I don't have any idea why?
the file msg has the text: «one text message»
the original image:
img.bmp
What do you see with strace ./HiddeMsg msg.txt img.bmp img_mod.bmp, or when you run it under a debugger like GDB and single-step? It seems like you haven't done any debugging (or haven't told us what you found, which is equivalent as far as SO is concerned). Also, this code is far from minimal as far as being a [mcve].
@PeterCordes i don't know how to debbug a nasm project... never learn/done it in class, from what i understand, i think teacher said that it's a bit different debbug a program that uses the comand line from a program that doesn't...
In GDB, starti msg.txt img.bmp img_mod.bmp to run it with args, stopping before the first instruction of _start. Learning to use a debugger will make it easier to figure out everything else, very much worth your time. (And a waste of everyone else's time to ask about code you haven't debugged with a debugger, strace, or even debug-print function calls. I highly recommend using a debugger because debug-print calls are a pain to write in asm.)
Your program once more terminates prematurely because the very first check does not compare the argc correctly.
When your program starts (HiddeMsg msg.txt img.bmp img_mod.bmp), the stack contains the following:
at RSP+32 argv3 pointer to 'img_mod.bmp'
at RSP+24 argv2 pointer to 'img.bmp'
at RSP+16 argv1 pointer to 'msg.txt'
at RSP+8 argv0 pointer to the program name
at RSP argc 4
Check the number of arguments:
mov rbp, rsp
mov rax, [rbp] ; argc
cmp rax, 4
jne fim
The other arguments are pointers to the file names. It's not useful to convert these pointers to their decimal representation like your program is doing with calling converte. Just store the pointers locally for later use:
mov rax, [rbp+8*2] ; argv[1]
mov [msg_url], rax
mov rax, [rbp+8*3] ; argv[2]
mov [img_url], rax
mov rax, [rbp+8*4] ; argv[3]
mov [new_img], rax
Opening the message file becomes
mov rsi, O_RDONLY
mov rdi, [msg_url]
mov rax, SYS_open
syscall
test rax, rax
js errorOnOpen
mov [msgDesc], rax
You have a mash-up in the part that reads from the message file and the bitmap file, where, for some reason, you zero-terminate both these contents that were read.
You are putting the zero for the message at the same offset as you used for the bitmap! This action will destroy 1 byte of the bitmap!
Keep reading and zeroing together so you use the correct RAX:
mov edx, MSGBUFF_SIZE
mov rsi, readMsgBuffer
mov rdi, [msgDesc]
mov rax, SYS_read
syscall ; -> RAX
test rax, rax
js errorOnRead
mov rsi, readMsgBuffer
mov byte [rsi+rax], NULL
fim:
mov rsp, rbp
pop rbp
mov rax, 1
xor rbx, rbx
int 0x80
ret
Don't terminate your 64-bit program like this. Use the correct SYS_exit syscall:
fim:
xor edi, edi
mov eax, SYS_exit
syscall
With this many errors you should better not insist too much on the contents of escrever. First exercise opening and closing files, and printing the message from the message file. Simpler things. Only move on once this works good...
| common-pile/stackexchange_filtered |
Firebase storage url, new file keep same access token
Duplicate of: Firebase storage URL keeps changing with new token
When a user uploads a profile pic I store this in firebase storage with the file name as the uid.
Lets say the user then goes and makes say 100 posts + 500 comments and then updates their profile image.
Currently I have a trigger which goes and updates the profile image url in all of the post and comment documents. The reason I have to do this is that when the image is changed in storage the access token is changed and this is part of the url so the old url no longer works.
What I want to do is not have the access token change. If I can do this I can avoid the mass updates that will massively increase my firestore writes.
Is there any way to do this? or an alternative?
Edit:
Another solution if you don't mind making the file public.
Add this storage rule and you won't have to use a token to access the file.
This will allow read access to "mydir" globally in any subfolder.
match /{path=**}/mydir/{doc} {
allow read: if true;
}
check my answer: https://stackoverflow.com/a/64129850/4159105
Not sure why this got voted down.
I think making URL in even number of segment can solve this problem. like - folder/subfolder/subsubfolder/image.jpg
Answer provided by @Prodigy here: https://stackoverflow.com/a/64129850/10222449
I tried this and it works well.
This will save millions of writes.
var storage = firebase.storage();
var pathReference = storage.ref('users/' + userId + '/avatar.jpg');
pathReference.getDownloadURL().then(function (url) {
$("#large-avatar").attr('src', url);
}).catch(function (error) {
// Handle any errors
});
How are you getting over the media token when they update the profile picture ?
I believe that when the picture is updated I just delete the current one and then go through the same process as uploading a new one with the same name.
Yeah I've tried that as well, and I can't seem to avoid that token. I've set the bucket as public, and initially had the image as {userid}.png but now tried the ../{userid}/avatar.png and then also tried to first delete the original image and upload the new one. Still nothing, i need the new one to have the token or nothing at all. i've sent feedback to Firebase around this.
I just had a look at my code and while it did work when I tested it I am not yet using it. I was planning on adding it in the future. Do you generate a new token when you upload the updated image? pathReference.getDownloadURL() should return the full url with token.
Looks like for me it is something to do with caching of some sort somewhere either on device, cdn or some where else. What i ended up having to workaround this is to apply a dummry parameter on the url (?dummy=asdasd). This is not ideal since it works where i know the profile... but not when I bring back a list of users...etc...
Interesting. I will come back and comment if I have similar issues when I implement. I have had to do this with things before to stop them caching. Firebase storage has caching settings too I believe.
There are only two options here:
You store the profile image URL only once, probably in the user's profile document, and look it up every time it is needed. In return you only have to write it once.
You store the profile image URL for every post, in which case you only have to load the post documents and not the profile URL for each. In return you'll have to write the profile URL in each post document, and update it though.
For smaller networks the former is more common, since you're more likely to see multiple posts from the same user, so you amortizing the cost of the extra lookup over multiple posts.
The bigger the network of users, the more interesting the second approach becomes, as you'll care about read performance and simplicity more than the writes you're focusing on right now.
In the end, there's no singular right answer here though. You'll have to decide for yourself what performance and cost profile you want your app to have.
If I used a 3rd party service to store the users profile image that allowed a static url even if the image changed this would provide a 3rd alternative with no extra reads or writes. e.g. http://someservice/myaccount/uid.jpg
| common-pile/stackexchange_filtered |
VBA script won't run. Getting Run Time Error 424 Object required
I am getting a run time error when I try to run this. Can anyone solve this?
Sub recolor()
'Sub Color_Change
Dim rng As Range
Set rng = Range("A1:E16")
If Not Intersect(Target, rng) Is Nothing Then
Dim cell As Range
For Each cell In Intersect(Target, rng)
Dim cellValue As String
Dim rgbArray() As String
cellValue = cell.Value
rgbArray = Split(cellValue, ", ")
If UBound(rgbArray) = 2 Then
Dim red As Integer
Dim green As Integer
Dim blue As Integer
red = CInt(rgbArray(0))
green = CInt(rgbArray(1))
blue = CInt(rgbArray(2))
cell.Interior.Color = RGB(red, green, blue)
End If
Next cell
End If
End Sub
I am getting a Object required error
What is Target here? You've not declared it, so is it a Global variable?
...might help to explain how the code is triggered, and on what range it's supposed to operate.
Hi Tim, the target are the cells. The contents of the cells have a RGB color code, such as "99, 190, 123" or "249, 124, 110". I would like the cell to be the color that is listed in the cell. Does that help? Cells A1:E16 contain values in the format "###,###,###". I need this to be dynamic.
What cells exactly do you want to run this on? All cells in A1:E16? If Yes then what is Target for here? If No then which cells should be processed? There is no global Target range edfined for you in Excel VBA - typically Target shows up as a parameter in event procedures, where (eg) Target represents a range of cells being changed or selected.
What cells exactly do you want to run this on? Yes, I want to run this on cells A1:E16. If Yes then what is Target for here? I am not sure - I did not write it.
You don't need Target if you just want to process a fixed range:
Sub recolor()
Dim cell As Range, arr
For Each cell In ActiveSheet.Range("A1:E16").Cells 'or use a specific worksheet
arr = Split(cell.Value, ",")
If UBound(arr) = 2 Then
cell.Interior.Color = _
RGB(CLng(Trim(arr(0))), CLng(Trim(arr(1))), CLng(Trim(arr(2))))
End If
Next cell
End Sub
| common-pile/stackexchange_filtered |
Generate a list from a distinct values of a column mysql, nodejs
I have a mysql table with a field named "year". I want to create a list from distinct values from the year column. my expected output is,
[2011,2013,2014,2015]
but what I receive when I try SELECT DISTINCT year FROM table_name is,
[
{ year: 2012 },
{ year: 2018 },
{ year: 2017 },
{ year: 2014 },
{ year: 2015 },
{ year: 2020 },
{ year: 2019 },
{ year: 2016 },
{ year: 2011 },
{ year: 2013 }
]
When I try SELECT GROUP_CONCAT(DISTINCT year) AS year returned [ { year: '2011,2012,2013,2014,2015,2016,2017,2018,2019,2020' } ].
Please suggest me a method to get the output in my expected format.
My final goal is to create a dict like this from sql query,
{
"years":[2011,2012,....], // distinct years
"int_colors":["red","green",], // distinct colors
"min-date":"2020-11-01",
"prices":{
"min": 0, //min value of price column
"max": 0 max value of price column
}
}
I need to do this using only a single sql query.
Just solved it on client side. used the following sql query,
SELECT GROUP_CONCAT(DISTINCT year) AS years,
GROUP_CONCAT(DISTINCT int_color) AS int_colors,
MIN(price2) AS price_min, MAX(price2) AS price_max
FROM table_name
result is
[
{
years: '2011,2012,2013,2014,2015,2016,2017,2018,2019,2020',
int_colors: 'ash,beiege,beige,black,black pearl,black/alloy,black/brazen gold,black/diesel gray,black/ruby red,brown,brown/tan,charcoal,coral red,dark ash,dark atmosphere/loft brown,dark slate gray,ebony,ebony w/cirrus headlining and ebony/cirrus ip,graphite,gray,jet black,mocha,newmarket tan,red,sand beige,tan,white',
ext_colors: 'alpine white,amp,anvil clear coat,billet clearcoat,black,black amethyst,black clear coat,blizzard pearl,blue,brilliant black crystal pearl coat,brilliant silver metallic,brllian silver metallic,crystal white pearl,destroyer gray clear coat,destroyer grey,diamond black clear coat,glacier white,granite crystal metallic clear coat,granite pearl coat,gravity gray,gray,gun metalic,gun metallic,hallmark,island blue metallic,magnetic metallic,mandarin gold metallic,maximum steel metallic clear coat,pepperdust metallic,per mfg,pitch black,polar white,red,silver,silver lining metallic,super black,torred redcoat,true blue pearl coat,white,white knuckle clear coat,white knuckle clearcoat,yellow,yellow jacket clear coat',
price_min: 10895,
price_max: 85995
}
]
So I have converted it to the expected format in client site.
| common-pile/stackexchange_filtered |
How to merge elements from multiple lists with same ID in Python?
I have a text file with 670,000 + lines need to process.
Each line has the format of:
uid, a, b, c, d, x, y, x1, y1, t, 0,
I did some cleanning and transferred each line to a list:
[uid,(x,y,t)]
And my question is: How can I merge (x,y,t)tuples in different lists but have the common uid efficiently?
For example:
I have multiple lists
[uid1,(x1,y1,t1)]
[uid1,(x2,y2,t2)]
[uid2,(x3,y3,t3)]
[uid3,(x4,y4,t4)]
[uid2,(x5,y5,t5)]
......
And I want to transfer them into:
[uid1,(x1,y1,t1), (x2,y2,z2)]
[uid2,(x3,y3,t3), (x5,52,z5)]
[uid3,(x4,y4,t4)]
......
Any help would be really appreciated.
Could you share the code which you have tried to solve your issue? Please read the following rules to ask a question: https://stackoverflow.com/help/how-to-ask and https://stackoverflow.com/help/minimal-reproducible-example
Just use a defaultdict.
import collections
def group_items(items):
grouped_dict = collections.defaultdict(list)
for item in items:
uid = item[0]
t = item[1]
grouped_dict[uid].append(t)
grouped_list = []
for uid, tuples in grouped_dict.iteritems():
grouped_list.append([uid] + tuples)
return grouped_list
items is a list of your initial lists.
grouped_list will be a list of the grouped lists by uid.
If your data is stored in a dataframe, you can use .groupby to group by the 'uid', and if you transform the values (x,t,v) to a tuple ((x,t,v),), you can .sum them (i.e. concatenate them).
Here's an example:
df = pd.DataFrame.from_records(
[['a',(1,2,3)],
['b',(1,2,3)],
['a',(10,9,8)]], columns = ['uid', 'foo']
)
df.apply({'uid': lambda x: x, 'foo': lambda x: (x,)}).groupby('uid').sum()
On my end, it produced:
uid foo
a ((1, 2, 3), (10, 9, 8))
b ((1, 2, 3),)
As a side note - please note that my solution example included a (minimal) data example that allows to demonstrate the problem and the desired result. It should've been added in the question, especially in a Python pandas question. To learn more, please visit the tour (https://stackoverflow.com/tour), how to ask (https://stackoverflow.com/help/how-to-ask) and how to ask a pandas question (https://stackoverflow.com/questions/20109391/how-to-make-good-reproducible-pandas-examples)
You can use the groupby method from itertools. Considering you have your original lists in a variable called lists:
from itertools import groupby
lists = sorted(lists) # Necessary step to use groupby
grouped_list = groupby(lists, lambda x: x[0])
grouped_list = [(x[0], [k[1] for k in list(x[1])]) for x in grouped_list]
How about using defaultdict, like this:
L = [['uid1',(x1,y1,t1)],
['uid1',(x2,y2,t2)],
['uid2',(x3,y3,t3)],
['uid3',(x4,y4,t4)],
['uid2',(x5,y5,t5)]]
from collections import defaultdict
dd = defaultdict(list)
for i in L:
dd[i[0]].append(i[1])
The output: print(dd)
defaultdict(list,
{'uid1': [(x1, y1, t1), (x2, y2, t2)],
'uid2': [(x3, y3, t3), (x5, y5, t5)],
'uid3': [(x4, y4, t4)]})
| common-pile/stackexchange_filtered |
Rails, get object association when using build (object still not in database), the DRY
Im building a report system which uses a sort of meta question model. Questions are previusly saved in the database, and then depending of the type of report some questions are taken from the database.
Wanting to keep things DRY, i'm trying to figure out a way to pass the information of the Variable model to my report_header with no avail.
In the new action i have:
reportBody = @report_head.report_bodies.build(:variable_id => a.id)
@report_head.variables #modified, thx.
all i need is to pass the attributes from the Variable to report_head in a DRY way.
If you need to know my models:
class Variable < ActiveRecord::Base
attr_accessible :id,:break_point, :description, :name, :time_frequency, :v_type
has_many :report_bodies
has_many :report_heads, :through => :report_bodies
end
class ReportHead < ActiveRecord::Base
attr_accessible :email, :name , :report_bodies_attributes, :report_bodies, :variables_attributes
has_many :report_bodies
has_many :variables, :through => :report_bodies
accepts_nested_attributes_for :report_bodies
end
class ReportBody < ActiveRecord::Base
attr_accessible :report_head_id, :variable_value, :variable_id, :variables_attributes, :report_heads
belongs_to :report_head
belongs_to :variable
end
Update
I updated the model as suggested, and modified the way to call the variables. However im still confused about how to use it in the view, if i do something like:
<%= f.fields_for :variables do |variable| %>
<%= variable.text_field :name, :value => :name, :class => 'text_field' %>
<% end %>
it prints a string instead of the actual name.
You have define wrong name association, your association of ReportBody should be:
belongs_to :report_head
belongs_to :variable
This is not correct:
@report_head.report_bodies.build(:variable_id => a.id,:report_head_id<EMAIL_ADDRESS>
chang it to:
@report_head.variables.build(:variable_id => a.id)
it's better, you don't have to set report_head_id. And this is wrong:
@report_head.report_bodies.variables
If you want to get all variables belong to @report_head, you just need using:
@report_head.variables
no worries i will, you think you can hopefully help me on that last part?
is your form for create new object? fields_for only used for form create new object, you just need <%= variable.text_field :name %>
yes, my form is for creating reports, however my variables already exist. doing just that gives me the error undefined local variable or method 'variable' for #<#<Class:0xaea4fd4>:0xb4b21d8>
by the way, i used the console to test the output of @report_head.variables after doing @report_head.report_bodies.build(:variable_id => 1) however it returned an empty object
no, as my answer, you cannot use @report_head.report_bodies.build(:variable_id => 1), just @report_head.variables.build(name: 'something') to create new variable and insert to ReportBody.
| common-pile/stackexchange_filtered |
Package deployed in azure sql server fails as it cant access remote excel file
SSIS Error Code DTS_E_CANNOTACQUIRECONNECTIONFROMCONNECTIONMANAGER. The AcquireConnection method call to the connection manager "Excel Connection Manager" failed with error code 0xC0202009.
I have deployed my SSIS project & package in Azure sql server. When I execute it through visual studio 2017 in my local machine it is executed successfully. Whereas it shows the above error if I execute the deployed package manually in Azure sql server SSISDB through SSMS tool.
The package in azure sql server is not able to access my excel file present in local folder because both are in different system.
How to write my excel connection string so that package deployed in azure sql server can access the excel file in remote system
Note: When the package is executed in the system where there is excel file , it is executed successfully. When it is executed from azure sql server manually through SSMS it fails, because it can't access the excel which is in remote system
Here is the image
Can you please clarify whether you are trying to run SQL Server in an Azure VM or if you are using Azure SQL Database (aka SQL Azure) as the target?
I am using Azure SQL Database aka SQL Azure
Unfortunately sql azure does not currently support linked servers. So you would need to access excel from outside the database layer
| common-pile/stackexchange_filtered |
I don't see an explanatory legend in SAS proc freq
I have a problem with the legend not being visible at cross tabulation in porc freq in SAS. How can I show it?
I see only a table:
But I want to also see a legend:
Show the code that generated the graphic without the legend. AFAIK it's part of the default though one looks like PDF versus HTML output.
proc freq data=&dane;
tables &zm_i*&zm_j /chisq;
run;
This is the proc freq code and its in a html output
There has to be more code around it, otherwise the default output is as shown in your second post. The second post is the default settings. Some settings have been changed to get the first table. So show the full code you've used.
ods layout end;
ods html close;
%let i = %eval(&i + 2);
%end;
%mend;
This is full code
Please post the code as an edit to your question not in the comments, very hard to read. You also need to show how the macro is called because we have no idea what those macro parameters are. But I think your layout is causing the issue. If you try a PROC FREQ in a clean session you'll see the standard output. So something in your code there is truncating the titles. Try removing them one at a time until you find what is causing the issue.
Add options mprint symbolgen; before the macro call, run the code and post the full log. That may have the options.
I post the code. The problem occurs even when I run the proc freq alone outside the macro.
Then your PROC FREQ template has been destroyed somehow, you'll need to find someone else's proc freq template and replace yours or reinstall SAS.
PROC FREQ does not have a legend per se, but you can use the title statement to put some information there, or footer.
If you want better control, use PROC TABULATE which has substantial ability to customize the output, including the box option which places whatever information you want in the top-left corner of the table.
If you run the following:
proc template;
source Base.Freq.CrossTabFreqs;
run;
Then check your log, you should see:
define crosstabs Base.Freq.CrossTabFreqs;
notes "Crosstabulation table";
cellvalue Frequency Expected Deviation CellChiSquare TotalPercent Percent RowPercent ColPercent CumColPercent;
header TableOf ControllingFor;
footer NoObs Missing;
If you do not, that means that someone has modified your PROC FREQ base template and that's why it's not showing. In this case you would need to replace your default SAS installation templates by obtaining clean copies from a different source. It may also need to align with your SAS version.
If you have a colleague or different SAS installation that is operating as expected, you can run the PROC TEMPLATE above, then copy the code from the log and re-create the template as:
proc template;
<insert copied code from log>;
run;
And you probably realize, but this is why modifying templates is generally a last resort for customizing data/output and usually you should never modify the default templates.
| common-pile/stackexchange_filtered |
how to manage top, left bottom and other spacing on WinForm form?
I am working on a Win Forms application which has tab and many tab pages are there. All of these are having many contorls on them. I need to manage form's top,left, right and bottom margins. Before I was managing this manaually by selecting and moving controls/ labels and by trying to make there margin equal but it is a little hard after some tabs to manage margions of form and contorls in form and also between controls.
Please advice what can be best and professional way to manage this.
Edit
My conrol heraricy is as below:
Form > Tab Contorl > Tab Page > Form Spliter > Input Controls
Form Spliter is Dock to Left.
Please clarify wether this is WinForms or WebForms. Your title says "winform form" but your question says "web forms application".
Oh I ma sorry David, it is Win Form. corrected.
Instead of moving controls one by one , you can use the easiest way. Click on the window then Press ctrl+A then press Arrow Buttons. This helped a lot for me.
Set each tab page's Padding property (it's in the Property Grid in the Properties panel). It will help with aligning controls using the visual designer. Also make sure you're using the Anchor property to handle resizing without needing to write your own code.
Note that if you have a form with lots of tab pages with lots of controls on, chances are you're doing something wrong.
The best approach is to create a composite UserControl for each tab page. It really does simplify things.
Thanks David, can you please advice how I should Anchor property ? I am trying passing but it is not causing any change. Also I am not making spacing by code.
When a control is Left-anchored to its parent, it means the distance between its left edge and its parent's left edge will be the same, so when a control that is both left and right-anchored it means the control will stretch horizontally to ensure the right-right and left-left distances are kept the same.
The anchor property only takes effect during resize events, it does not make any instantaneous visible changes.
Can you please see my edit.
Is there anything I should do to make padding working ?
Can you post a screenshot please?
| common-pile/stackexchange_filtered |
How to access http-toolkit packets programmatically using python?
Is there a way to receive and process packets intercepted in http-toolkit programmatically using python?
Is there any internal API I access?
Ideally I would like to receive the packets in a JSON or HAR format.
sounds like an xy problem. there are a number of python packages that do this, requests being the most popular
Hey! I suspected i didnt make myself clear enough. The end goal is to manipulate and capture HTTP traffic of an Android device. HTTP toolkit does it great but without a programmatic interface which caused me to ask the question.
Within HTTP Toolkit itself, this isn't possible right now, but it is planned in future. You can +1 on the issue to vote for it here: https://github.com/httptoolkit/httptoolkit/issues/37. With that, you'd be able to add your own scripts within HTTP Toolkit which could process or store packets elsewhere any way you like, including sending them to a Python process.
In the meantime, this may be possible using Mockttp. Mockttp is the internals of HTTP Toolkit as an open-source JavaScript library that you can use to build your own fully scriptable proxy, and once that's working you can easily add logic to forward packets to Python on top of that. There's a getting started guide here: https://httptoolkit.tech/blog/javascript-mitm-proxy-mockttp/.
First of all thank you very much for your answer! Does Mockttp can easily connect to Android devices? The end goal is to programmatically capture and manipulate HTTP traffic of an Android device.
No - it's totally possible to intercept an Android device with Mockttp, but you'll need to do the setup for that manually. For fully automated setup, you'd still need HTTP Toolkit, which means waiting for the full feature linked above.
| common-pile/stackexchange_filtered |
CodeMirror force next line from 80 position
Is there a way that I can force line break when user tries to type any character after column 80 for a document in code mirror.
Also optionally for indent on the next line
Yes, there is a way to do that. On cursor activity, test if current character index is 80 or over. if so, replace selection with new line "\n" .
but do you really want to do that? break a word in half?
editor.on("cursorActivity" ,function(editor){
currentChar = editor.getCursor().ch
if (currentChar >= 80){
editor.replaceSelection("\n" ,"end")
}
})
Thank you it does lead to what I am looking for.. Just that behavior is little weird.. Actually I would like user to be not able to type after 80 char not really break new line... but I though new line break may be easier to achieve.
| common-pile/stackexchange_filtered |
Existence of integrals in f.d Hopf algebras
In THE HAAR MEASURE ON FINITE QUANTUM GROUPS, van Daele gives an implausibly short proof of the existence of integrals in a finite-dimensional Hopf algebra.
I'm probably overlooking something obvious, but this proof feels very esoteric to me: I cannot draw it in pictures.
So:
Let $H$ be a f.d. Hopf-algebra. Then there exists a non-zero $t\in H$ s.t. for all $h\in H$ we have $h.t=\epsilon(h)t$.
Proof. Let $\{e_i\}$ be a basis of $H$ with corresponding dual basis $\{e^j\}$. Take any non-zero $b\in H$, and set
$$
t = \sum_{i,(e_i)}\langle e^i, S^2(e_i'') b\rangle e_i'\ ,
$$
where $\langle f,h\rangle = f(h)$ is the pairing.
Then, for any $h\in H$ we have
\begin{align}
\epsilon(h)t &~=~\sum_{i,(e_i),(h)}\langle e^i, h'S(h'')S^2(e_i'') b\rangle~ e_i'\\[1em]
\tag{$\star$}&~=~\sum_{j,i,(e_i),(h)}\langle e^i, h' e_j\rangle~\langle e^j,S(h'')S^2(e_i'') b\rangle~ e_i'\\[1em]
\tag{$\maltese$}&~=~\sum_{i,(e_i),(h)} \langle e^i, S(h''')S^2(h''e_i'') b \rangle~ h'e_i' \\[1em]
\tag{$\spadesuit$}&~=~h \sum_{i,(e_i)} \langle e^i, S^2(e_i'')b\rangle ~e_i' \\
&~=~ ht
\end{align}
The first equality is obvious (and of course the last one too). Next, $\star$ is probably something like inserting $1$ or $\operatorname{id}_H$ (which is given by $\operatorname{coev}_H(1)$), but I'm not sure?
But I don't even have a guess for $\maltese$ and $\spadesuit$.
For example, in $\maltese$, why is $h'$ suddenly not paired anymore, and where does the second antipode come from that acts on $h''$?
And $\spadesuit$ at least superficially seems to be using the definition of the antipode, but I don't think it actually does.
Please end my suffering.
The following is a failed attempt. The following is true: Assume for a second that the pairing is actually symmetric. Then
\begin{align}
&\sum_{i,(e_i),(h)} \langle e^i, S(h''')S^2(h''e_i'') b \rangle~ h'e_i' \\[1em]
~=~&\sum_{i,(e_i),(h)} \langle e^i, S(h''')S^2(e_i'')S^2(h'') b \rangle~ h'e_i' \\[1em]
~=~&\sum_{i,(e_i),(h)} \langle e^i, S(h'''S(h''))S^2(e_i'') b \rangle~ h'e_i' \\[1em]
~=~&\sum_{i,(e_i),(h)} \langle e^i, S^2(e_i'') b \rangle~ \epsilon({h''}) h'e_i' \\[1em]
~=~&h\sum_{i,(e_i)} \langle e^i, S^2(e_i'') b \rangle~ e_i' \ .
\end{align}
But I think the pairing is only symmetric if $H$ is commutative. So this is superfluous. Meh.
Nope. The solution to $\spadesuit$ is just
\begin{align}
S(h'')S^2(h' a) &= S(S(h'a)h'') = S(S(a)S(h')h'') = \epsilon(h)S(a)\ ,
\end{align}
isn't it?
Here's a suggestion of a different way to do the case $b=1$ which might shed some light. The result is equivalent to saying that $(1\otimes \mathrm{eval}) \circ (1\otimes S^2 \otimes 1) \circ (\Delta \otimes 1) \circ \textrm{id} : k \to A$ is an $A$-module map, where by $\textrm{id}$ I mean $1\mapsto \sum e_i\otimes e^i$, which is certainly an $A$-module map, and $\mathrm{eval}$ is the pairing $A\otimes A' \to k$ which is also (I think!) an $A$-module map. So all you'd need to do is to examine $(1\otimes S^2 \otimes 1) \circ \Delta \otimes 1$...
That the pairing is a module map follows from the action $(h.f)(a) = f(S(h).a)$ on $A^*$ and the definition of the antipode. I forgot that we can use pivotality of $\mathbf{Vect}$, so your comment made my pictures much clearer. I have posted an answer, is that what you meant?
While it proves the statement, it doesn't help me in understanding the original proof, or am I - again - missing something?
Well, the identities resulting from this map being a module hom must imply the ones used in these manipulations somehow or other, but it is not obvious. You could try working out which equalities of maps are being asserted by $\spadesuit$ and $\maltese$, since these might relate more directly to what you know about the module structure.
This doesn't answer the question, I think, but it gives an alternative proof which convinces me of the truth of the statement. (I'm still interested in understanding the original proof)
The argument can be seen from the following picture:
This is for the case $b=1$, as user m_t_ suggested. If $b$ is any other non-zero element then the proof will be completely analogous, because $b$ is attached on the right and above the two antipodes, so nothing will get in each other's way.
The steps are:
coevaluation is an intertwiner
comultiplication is an intertwiner
antipode is an antihomomorphism, use twice
the (left) action on the dual space is $\langle a, h\rightharpoonup f \rangle \equiv \langle S(h)\rightharpoonup a, f\rangle$
antipode axiom
(There is some portion of copy+paste, only to have the full framework.)
Recall: Let $\{e_i\}$ be a basis of $H$ with corresponding dual basis $\{e^i\}$.
In particular, we have for the identity $\operatorname{id}\in\operatorname{Hom}(H,H)\cong H^*\otimes H$
the corresponding writing $\operatorname{id}\leftrightarrow \sum e^i\otimes e_i$.
So we can write any element in $H$ in the form
$$
h = \operatorname{id}(h)
=\sum e^i(h)\;e_i
=\sum \langle e^i,h\rangle\;e_i
\ .
$$
Also, we have with appropiate notations:
$$
\begin{aligned}
\langle e^i, hk\rangle
&=
\langle e^i, h\;\operatorname{id}(k)\rangle
\\
&=
\left\langle e^i, h\;
\sum\langle e^j, k\rangle e_j
\right\rangle
\\
&=
\sum
\langle e^i, h e_j\rangle\;
\langle e^j, k\rangle
\qquad (*)
\ .
\end{aligned}
$$
Also, for some (linear) $\phi:H\to H$ we have
$$
\phi (h) = \phi h
=\phi \sum \langle e^i,h\rangle\;e_i
=\sum \langle e^i,h\rangle\;\phi(e_i)
\ .
\qquad(\diamond)
$$
Take now any non-zero $b\in H$, and set
$$
t = t(b) = \sum_{i,(e_i)}\langle e^i, S^2(e_i'') b\rangle e_i'\ ,
$$
Now we start the...
Long computation:
The sums will have appropriate (in part conventional, Hopf algebraic) indices of summation.
For any $h\in H$ we have then:
$$
\begin{aligned}
\epsilon(h)\;t(b)
&=
\epsilon(h)\sum\langle e^i, S^2(e_i'') b\rangle e_i'
\\
&=
\sum\langle e^i, \epsilon(h)\; S^2(e_i'') b\rangle e_i'
\\
&=
\sum\langle e^i, (\operatorname{id}*S)(h)\; S^2(e_i'') b\rangle e_i'
\\
& = \sum\langle e^i, h'S(h'')\; S^2(e_i'') b\rangle\; e_i'
\\
& = \sum\langle e^i, h'\;\operatorname{id}(S(h'')\; S^2(e_i'') b)\rangle\; e_i'
\\
&\qquad\text{as in $(*)$}
\\
& = \sum\langle e^i, h'e_j\rangle\;\underbrace{\langle e^j,\ S(h'')\; S^2(e_i'') b\rangle\; e_i'}_{\phi_j(e_i)}
\\
&\qquad\text{as in $(\diamond)$ with the linear function $\phi_j=\phi_j(\ \cdot\ ,h'')$ given by}
\\
&\qquad\text{$\phi_j(x)=\langle e^j, \ S(h'')\; S^2(x'') b\rangle\; x'\rangle$}
\\
&=\sum \phi_j(h'e_j,h'')
\\
&=\sum \langle e^j,\ S(h'')\; S^2((h'e_j)'') b\rangle\; (h'e_j)'
\\
&\qquad\text{... now we have to redistribute the parts. I did not check, but it is plausible...}
\\
&=\sum \langle e^j,\ S(h''')\; S^2(h''e_j'') b\rangle\; h'e_j'
\\
&=\sum \langle e^j,\ S(h''')\; S^2(h'')S^2(e_j'') b\rangle\; h'e_j'
\\
&\qquad\text{$S$ antimorphism, so $S^2$ morphism, as in your failed attempt,}
\\
&\qquad\text{(Just correct it at the one place!)}
\\
&=\sum \langle e^j,\ S(S(h'')h''')S^2(e_j'') b\rangle\; h'e_j'
\\
&=\sum \langle e^j,\ S((S*\operatorname{id})h'')S^2(e_j'') b\rangle\; h'e_j'
\\
&=\sum \langle e^j,\ S(\epsilon(h''))S^2(e_j'') b\rangle\; h'e_j'
\\
&=\sum \langle e^j,\ \epsilon(h'')S^2(e_j'') b\rangle\; h'e_j'
\\
&=\sum \langle e^j,\ S^2(e_j'') b\rangle\; \epsilon(h'')h'e_j'
\\
&=\sum \langle e^j,\ S^2(e_j'') b\rangle\; he_j'
\\
&=h\sum \langle e^j,\ S^2(e_j'') b\rangle\; e_j'
\\
&= h\; t(b)\ .
\end{aligned}
$$
(There is only one place, where we have to draw diagrams and rearrange using the fact that $m$ is comultiplicative and/or $\Delta$ is multiplicative. It is the place marked as "plausible". The notation is also fuzzy for my taste. But i hope i could give a hint of what is going on, seen from my perspective. It is a pity, that in this era of electronic publications authors do not provide full proofs, with full details. It took me some days to exhibit the arguments... Sorry, have to submit, back to work.)
Since it seems that more or less you figure out the solution, an answer is not needed anymore, but please let me take Van Daele's part by showing why things are easier than you think. Let $H$ be finite-dimensional with finite dual basis $\{e_i,e^i\}$. I will adopt Sweedler notation in the form
$$\Delta(x) = \sum x'\otimes x''$$
as you did. By definition, $t=\sum e^i\left(S^2(e_i'')b\right)e_i'$. Notice the following facts
\begin{equation}\label{eq:1}\tag{$\dagger$}
\sum e^i(h)e_i'\otimes e_i'' = \sum h'\otimes h''
\end{equation}
for all $h\in H$ since $h=\sum e^i(h)e_i$ and
\begin{equation}\label{eq:2}\tag{$\ddagger$}
\sum S(h'')S^2(h'k) = \sum S(h'')S\left(S(h'k)\right) = \sum S\left(S(k)S(h')h''\right) = S^2(k)\varepsilon(h)
\end{equation}
for all $h,k\in H$. Now
\begin{align*}
\varepsilon(h)t & = \sum e^i\left(\varepsilon(h)S^2(e_i'')b\right)e_i' \\
& = \sum e^i\left(h'S(h'')S^2(e_i'')b\right)e_i' \\
& = \sum e^i\left(h'e_je^j\left(S(h'')S^2(e_i'')b\right)\right)e_i' \\
& = \sum e^i\left(h'e_j\right)e^j\left(S(h'')S^2(e_i'')b\right)e_i' \\
& \stackrel{\eqref{eq:1}}{=} \sum e^j\left(S(h''')S^2(h''e_j'')b\right)h'e_j' \\
& \stackrel{\eqref{eq:2}}{=} \sum e^j\left(S^2(e_j'')b\right)h'\varepsilon(h'')e_j' \\
& = h\left(\sum e^j\left(S^2(e_j'')b\right)e_j'\right) = ht.
\end{align*}
Thus, van Daele's proof is indeed correct and indeed short, once you have a bit of practice in Sweedler's notation and finite-dimensional computations.
| common-pile/stackexchange_filtered |
XML and probably LXML issue
I have many XML files that look like this
<?xml version="1.0" encoding="utf-8" standalone="no"?>
<reiXmlPrenos>
<Qfl>1808</Qfl>
<fOVE>13.7</fOVE>
<NetoVolumen>613</NetoVolumen>
<Hv>104.2</Hv>
<energenti>
<energent>
<sifra>energy_e</sifra>
<naziv>EE [kWh]</naziv>
<vrednost>238981</vrednost>
</energent>
<energent>
<sifra>energy_to</sifra>
<naziv>Do</naziv>
<vrednost>16359</vrednost>
</energent>
<energent>
<sifra>energy_en</sifra>
<naziv>En</naziv>
<vrednost>0</vrednost>
</energent>
</energenti>
<rei>
<zavetrovanost>2</zavetrovanost>
<cone>
<cona>
<cona_id>1</cona_id>
<cc_si_cona>1110000</cc_si_cona>
<visina_cone>2.7</visina_cone>
<dolzina_cone>14</dolzina_cone>
</cona>
<cona>
<cona_id>2</cona_id>
<cc_si_cona>120000</cc_si_cona>
</cona>
</rei>
</reiXmlPrenos>
I would like to extract certain values from those XML files. So I put together with the help from people here code below that is suppose to work:
import pandas as pd
import glob
import os
from lxml import etree
os.chdir(r'R:\...\XML-1122_test')
dir_path = glob.glob('*.xml')
xmls = dir_path
#note: For simplicity, I'm using the well formed version of the xml strings in your question; you'll have to use actual file names and paths
energies = ["xml", "energy_ge", "energy_en", "energy_dteu", "energy_dtlb"]
#I just made up some names - you'll have to use actual names, of course; the first one is for the file identifier - see below
rows = []
for xml in xmls:
row = []
id = "xml-"+str(xmls.index(xml)+1)
#this creates the file identifier
row.append(id)
root = etree.XML(xml.encode())
#in real life, you'll have to use the parse() method
for energy in energies[1:]:
#the '[1:]' is used to skip the first "energy"; it's only used as the file identifier
target = root.xpath(f'//energent[./sifra[.="{energy}"]]/vrednost/text()')
#note the use of f-strings
row.extend( target if len(target)>0 else "0" )
rows.append(row)
print(pd.DataFrame(rows,columns=energies))
But in the end I get a warning:
File "<string>", line 1
XMLSyntaxError: Start tag expected, '<' not found, line 1, column 1
Is this an XML issue? Or maybe lxml issue? Does anyone know how to approach this?
Ideally, the result would look like this
xml energy_e energy_en energy_to
xml-1 238981 0 16539
xml-2 ... .. ..
The space is result from my copy-paste process. XML files don't have the space at the very beginning. And all the other tags are present as well, i copied only a fraction of the files just to give you and idea how it looks like.
@energyMax, can you add the exact expected dataframe (matching your input) ?
Are you using etree.XML(xml.encode()) intead of parse? xmls are filepaths not xml content
The entire code is presented above.
That might be the problem then. You should use parse().
import pandas as pd
import glob
import os
from lxml import etree
os.chdir(r'R:\...\XML-1122_test')
dir_path = glob.glob('*.xml')
xmls = dir_path
energies = ["xml", "energy_e", "energy_en", "energy_to"]
rows = []
for xml in xmls:
row = []
id = "xml-"+str(xmls.index(xml)+1)
row.append(id)
with open(xml, 'r', encoding='utf-8') as f:
xml_string = f.read()
root = etree.XML(xml_string.encode())
for energy in energies[1:]:
target = root.xpath(f'//energent[./sifra="{energy}"]/vrednost/text()')
row.extend(target if len(target)>0 else ["0"])
rows.append(row)
print(pd.DataFrame(rows, columns=energies)
parse below
import pandas as pd
import glob
import os
from lxml import etree
os.chdir(r'R:\...\XML-1122_test')
dir_path = glob.glob('*.xml')
xmls = dir_path
energies = ["xml", "energy_ge", "energy_en", "energy_dteu", "energy_dtlb"]
rows = []
for xml in xmls:
row = []
id = "xml-"+str(xmls.index(xml)+1)
row.append(id)
root = etree.parse(xml)
for energy in energies[1:]:
target = root.xpath(f'//energent[./sifra[.="{energy}"]]/vrednost/text()')
row.extend( target if len(target)>0 else "0" )
rows.append(row)
print(pd.DataFrame(rows,columns=energies))
Does this fix your problem?
Not yet. Does it play any role that at first there is energenti and only after that energent. So energenti is above them all... I get the same warning even now
I have edited the post to better fix your problem and answer your new question. Please tell me if this fixes your problem.
Can you please give the entire code as a hole? I'm a bit confused on some parts atm
Ok, please give me one minute to collect it
I can also do it by using parse as that is a viable solution.
ok here is both of the code
Since you're looking for a dataframe, you can simply use read_xml from pandas:
df = (
pd.read_xml(xml, xpath=".//energent")
.drop("naziv", axis=1)
.set_index("sifra").T
.rename_axis(None, axis=1)
)
And this is how you can incorporate it in your code :
xmls = glob.glob("*.xml")
list_dfs = []
for idx, xml in enumerate(xmls, start=1):
tmp_df = (
pd.read_xml(xml, xpath=".//energent")
.drop("naziv", axis=1)
.set_index("sifra").T
.rename_axis(None, axis=1)
)
tmp_df.insert(0, "xml", f"{xml}-{idx}")
list_dfs.append(tmp_df)
df = pd.concat(list_dfs, ignore_index=True)
Test/Output (x3 the same xml):
print(df)
xml energy_e energy_to energy_en
0 first.xml-1 238981 16359 0
1 second.xml-2 238981 16359 0
2 third.xml-3 238981 16359 0
Can you please put the entire code? I am (obviously) not very good in python, so i can't understand all the logic atm
I updated my answer, can you check it out ?
NameError: name 'StringIO' is not defined is the error
That's because I used io to read the xml, I corrected the code.
| common-pile/stackexchange_filtered |
Assigning a reducer for a dynamic store property
I'm trying to figure out how to set up a reducer for a property in my state tree that gets created from user events.
My state tree looks like this:
{
session: { session object },
dashboard: {
id: 'id001',
charts: {
'cid001': { dimensions: { ... }, more objects... },
'cid002': { dimensions: { ... }, more objects... }
}
}
}
New charts properties come in through Redux when a user clicks the Add Chart button. The key is set to the chart id, cid.
I'm currently using combineReducers() to set up the reducer tree.
import session from './session';
import charts from './charts';
const rootReducer = combineReducers({
session,
dashboard: combineReducers({
charts
});
});
I am able to nest reducers like this if I know the property names ahead of time. However, I'd like to avoid having a massive reducer for the charts property, since each chart inside it could have up to 20 more objects on it that need reducing, dimensions is just one example.
Is there a way to set a reducer for the charts['cidxxx'].dimensions, and other sub-properties? Are there wildcard tokens for this? Any thoughts?
Object spreads would be nice for something like this.
function chartsReducer(state, {payload}) {
const {cid} = payload;
const currentChart = state[cid] || {};
return {...state, [cid]: {...currentChart, ...payload}};
}
If you are looking to go deeper than that in terms of property trees, it may be worth thinking about how to change the shape of your data to match how you are using it.
| common-pile/stackexchange_filtered |
Show login page while unauthorized users try to view PDF files in laravel 5.0
In my application, PDF upload option is there. The uploaded PDF files stored in media/pdfs directory.
For ex. http://localhost/xxx/media/pdfs/samplePDF.pdf
Now this link is accessable for unauthorized users also. I want to avoid that. How to resolve this problem?
You define a route/route-group to serve your files from this pdf folder and then apply auth middleware to this route or route group
How to apply auth middleware to the url http://localhost/xxx/media/pdfs/samplePDF.pdf, because the pdf file name will be dynamic ie, samplePDF.pdf will change.
When declaring a route, you can include parameters associated with a given regular expression (in your case, maybe /^[a-zA-Z]+\.pdf$/.
Possible duplicate of Secure Asset/Media Folder through Auth Controller ? Laravel 5.2
In my application, laravel version is 5.0, the duplicate question you are mentioned above is for laravel 5.2
| common-pile/stackexchange_filtered |
Sendgrid Whitelabel email links not working
I've set up my Sendgrid account to use whitelabels for the domain and email links. This service is being used for a discourse forum.
I can see the SPF Sender permits sendgrid to send on behalf of my domain but the links are pointing to xxxx.ct.sendgrid.net instead of mail.mydomain as I expect.
All the CNAME records are marked as valid and default in Sendgrid's control panel and I can verify they exist when I query the CNAME records.
How can I fix this?
With the update last year, Domain Whitelabels and Links Whitelabels cannot be on the same subdomain anymore. You'll want to review your Whitelabels, make sure none of them have Yellow Warning tags on them. If they do, contact SG Support for assistance.
Assuming they don't, just rerun the Validation on each of them to make sure your recodes are good. If they are, everything should be working out properly.
| common-pile/stackexchange_filtered |
How to install Telegram snap on Ubuntu 16.04.2?
I would install Telegram snap on Ubuntu 16.04.2 and tried with sudo snap install telegram-sergiusens but the result was:
error: cannot install "telegram-sergiusens": Get
https://search.apps.ubuntu.com/api/v1/snaps/details/core?channel=stable&fields=anon_download_url%2Carchitecture%2Cchannel%2Cdownload_sha3_384%2Csummary%2Cdescription%2Cdeltas%2Cbinary_filesize%2Cdownload_url%2Cepoch%2Cicon_url%2Clast_updated%2Cpackage_name%2Cprices%2Cpublisher%2Cratings_average%2Crevision%2Cscreenshot_urls%2Csnap_id%2Csupport_url%2Ccontact%2Ctitle%2Ccontent%2Cversion%2Corigin%2Cdeveloper_id%2Cprivate%2Cconfinement%2Cchannel_maps_list:
dial tcp: lookup search.apps.ubuntu.com on [::1]:53: read udp [::1]:55238->[::1]:53: read:
Where am I wrong?
Weird error. Is snapd up-to-date? What does snap --version say?
@Kyle Thank you!
snap 2.25 snapd 2.25 series 16 ubuntu 16.04 kernel 4.8.0-53-generic
Use
snap find | grep telegram
to search for the Telegram package.
As of the time of writing
sudo snap install telegram-desktop
will install Telegram.
You may have to reboot in order for it to show up in the application list, however.
| common-pile/stackexchange_filtered |
How to change the default prompt in a singularity container
I am creating a Singularity image and I want to have a custom bash prompt, just like I can set a bash prompt as a normal user by setting the variable PS1 in my ~/.bash_profile.
But, unfortunately, Singularity does something to enforce its own prompt that looks like this: Singularity>. This is not a very helpful prompt.
I tried to set PS1 in the definition file like in this example:
# File prompt-test1.def
Bootstrap: shub
From: alpine:latest
%environment
export PS1="[my-container]\w\$ "
I built and ran that image, but to no avail:
sudo singularity build prompt-test.sif prompt-test.def
singularity shell prompt-test.sif
# shows Singularity>
Using the env command in the shell, I noticed that PROMPT_COMMAND is set:
echo $PROMPT_COMMAND
# PS1="Singularity> "; unset PROMPT_COMMAND
... so I tried to change that:
# File prompt-test2.def
Bootstrap: shub
From: alpine:latest
%environment
export PROMPT_COMMAND='PS1="[my-container]\w\$ "; unset PROMPT_COMMAND'
... but the prompt remains unchanged! But now, PROMPT_COMMAND inside the container looks like
echo $PROMPT_COMMAND
# PS1="[my-container]\w\$ "; unset PROMPT_COMMAND; PROMPT_COMMAND="${PROMPT_COMMAND%%; PROMPT_COMMAND=*}";PS1="Singularity> "
edited to replace with better info: The problem lies in the order in which the files in /.singularity.d/env are sourced. The user-defined environment variables are set in 01-base.sh, but in 99-base.sh there is the line
PS1="Singularity> "
... which overwrites my prompt! Also, the approach using PROMPT_COMMAND is thwarted because of some code in /.singularity.d/actions/shell or .../run.
A workaround would be for the user to use an own .profile. But that does not work if the user's home directory is bound to the home inside the container, because the .profile inside and outside of the container is the same then. Also, I don't want user intervention just to change a prompt.
How can I get my prompt?
By default, the shells run by singularity explicitly don't load any profile or rc files. The prompt, annoying as it is, is there to remind you that you are in an image, not the host OS.
If you want to override that anyway, you need to add an additional file to /.singularity.d/env that will be run after everything else. e.g.,
Bootstrap: docker
From: alpine:latest
%post -c /bin/bash
CUSTOM_ENV=/.singularity.d/env/99-zz_custom_env.sh
cat >$CUSTOM_ENV <<EOF
#!/bin/bash
PS1="[my_container]\w \$"
EOF
chmod 755 $CUSTOM_ENV
Adding -c /bin/bash after %post tells singularity to use /bin/bash instead of the default /bin/sh, necessary for the multiline cat step afterwards.
EDIT: older versions of singularity used a shebang in the first line for specifying an alternate shell, but has changed to -c /path/to/shell.
I'd like to customize PS1 like this to remind me which container I'm in (something like [$SINGULARITY_NAME] Singularity> ). Is this answer likely be stable for future versions of singularity?
When playing with magic files there's always a chance that something may change unexpectedly, but I'd say this is fairly stable. It is worth noting that the files in /.singularity.d/env are now always sourced with /bin/sh so only variable definitions / exports will be persisted.
Are there anyway to change the interpreter to /bin/bash? This information doesn't seem to be listed in the documentations.
In 2024, we can safely remove -c /bin/bash after %post and #!/bin/bash in the first line of the customer script in $CUSTOM_ENV$.
The solution that worked for me was to use tmux inside singularity. It has standard shell with path in prompt.
| common-pile/stackexchange_filtered |
Website registration works on XAMPP but not on my LAMP server
I've just finished setting up my remote server. setup LAMP, transferred my website over. I can login with a user fine but for some reason my code returns an error when inserting data. It goes through the code and does return an error if the user already exists, so I've determined it stops and returns an error after the line.
$sql = "INSERT INTO users (user_name, email, password, hash) "
. "VALUES ('$user_name','$email','$password', '$hash')";
which in turn returns the error:
else {
$_SESSION['message'] = 'Registration failed!';
header("location: error.php");
}
full code below.
<?php
$_SESSION['email'] = $_POST['email'];
$_SESSION['user_name'] = $_POST['user_name'];
// Escape all $_POST variables to protect against SQL injections
$user_name = $mysqli->escape_string($_POST['user_name']);
$email = $mysqli->escape_string($_POST['email']);
$password = $mysqli->escape_string(password_hash($_POST['password'], PASSWORD_BCRYPT));
$hash = $mysqli->escape_string( md5( rand(0,1000) ) );
// Check if user with that email already exists
$result = $mysqli->query("SELECT * FROM users WHERE email='$email'") or die($mysqli->error());
if ( $result->num_rows > 0 ) {
$_SESSION['message'] = 'User with this email already exists!';
header("location: error.php");
}
else { // Email doesn't already exist in a database, proceed...
$sql = "INSERT INTO users (user_name, email, password, hash) "
. "VALUES ('$user_name','$email','$password', '$hash')";
// Add user to the database
if ( $mysqli->query($sql) ){
$_SESSION['active'] = 0;
$_SESSION['logged_in'] = true;
}
else {
$_SESSION['message'] = 'Registration failed!';
header("location: error.php");
}
}
?>
What does $mysqli->error say?
$mysqli->error not $mysqli->error()
| common-pile/stackexchange_filtered |
R text file and text mining...how to load data
I am using the R package tm and I want to do some text mining. This is one document and is treated as a bag of words.
I don't understand the documentation on how to load a text file and to create the necessary objects to start using features such as....
stemDocument(x, language = map_IETF(Language(x)))
So assume that this is my doc "this is a test for R load"
How do I load the data for text processing and to create the object x?
Like @richiemorrisroe I found this poorly documented. Here's how I get my text in to use with the tm package and make the document term matrix:
library(tm) #load text mining library
setwd('F:/My Documents/My texts') #sets R's working directory to near where my files are
a <-Corpus(DirSource("/My Documents/My texts"), readerControl = list(language="lat")) #specifies the exact folder where my text file(s) is for analysis with tm.
summary(a) #check what went in
a <- tm_map(a, removeNumbers)
a <- tm_map(a, removePunctuation)
a <- tm_map(a , stripWhitespace)
a <- tm_map(a, tolower)
a <- tm_map(a, removeWords, stopwords("english")) # this stopword file is at C:\Users\[username]\Documents\R\win-library\2.13\tm\stopwords
a <- tm_map(a, stemDocument, language = "english")
adtm <-DocumentTermMatrix(a)
adtm <- removeSparseTerms(adtm, 0.75)
In this case you don't need to specify the exact file name. So long as it's the only one in the directory referred to in line 3, it will be used by the tm functions. I do it this way because I have not had any success in specifying the file name in line 3.
If anyone can suggest how to get text into the lda package I'd be most grateful. I haven't been able to work that out at all.
I just discovered that the stemDocument function doesn't seem to work at all unless the language is specified, so I've edited my code above to include that.
Can't you just use the function readPlain from the same library? Or you could just use the more common scan function.
mydoc.txt <-scan("./mydoc.txt", what = "character")
I actually found this quite tricky to begin with, so here's a more comprehensive explanation.
First, you need to set up a source for your text documents. I found that the easiest way (especially if you plan on adding more documents, is to create a directory source that will read all of your files in.
source <- DirSource("yourdirectoryname/") #input path for documents
YourCorpus <- Corpus(source, readerControl=list(reader=readPlain)) #load in documents
You can then apply the StemDocument function to your Corpus. HTH.
I believe what you wanted to do was read individual file into a corpus and then make it treat the different rows in the text file as different observations.
See if this gives you what you want:
text <- read.delim("this is a test for R load.txt", sep = "/t")
text_corpus <- Corpus(VectorSource(text), readerControl = list(language = "en"))
This is assuming that the file "this is a test for R load.txt" has only one column which has the text data.
Here the "text_corpus" is the object that you are looking for.
Hope this helps.
Here's my solution for a text file with a line per observation. the latest vignette on tm (Feb 2017) gives more detail.
text <- read.delim(textFileName, header=F, sep = "\n",stringsAsFactors = F)
colnames(text) <- c("MyCol")
docs <- text$MyCol
a <- VCorpus(VectorSource(docs))
The following assumes you have a directory of text files from which you want to create a bag of words.
The only change that needs to be made is replace
path = "C:\\windows\\path\\to\\text\\files\\
with your directory path.
library(tidyverse)
library(tidytext)
# create a data frame listing all files to be analyzed
all_txts <- list.files(path = "C:\\windows\\path\\to\\text\\files\\", # path can be relative or absolute
pattern = ".txt$", # this pattern only selects files ending with .txt
full.names = TRUE) # gives the file path as well as name
# create a data frame with one word per line
my_corpus <- map_dfr(all_txts, ~ tibble(txt = read_file(.x)) %>% # read in each file in list
mutate(filename = basename(.x)) %>% # add the file name as a new column
unnest_tokens(word, txt)) # split each word out as a separate row
# count the total # of rows/words in your corpus
my_corpus %>%
summarize(number_rows = n())
# group and count by "filename" field and sort descending
my_corpus %>%
group_by(filename) %>%
summarize(number_rows = n()) %>%
arrange(desc(number_rows))
# remove stop words
my_corpus2 <- my_corpus %>%
anti_join(stop_words)
# repeat the count after stop words are removed
my_corpus2 %>%
group_by(filename) %>%
summarize(number_rows = n()) %>%
arrange(desc(number_rows))
| common-pile/stackexchange_filtered |
Inconsistent behavior of Setters in WPF Style Triggers in conjunction with Local Values
I am using a custom TextBox control for my WPF application, which has a default style set in a Generics.xaml file. The Template I set in the file includes the following Setter and Triggers:
<Setter Property="Background" Value="Blue" />
<Trigger Property="IsFocused" Value="True">
<Setter Property="Background" Value="Yellow"/>
<Setter Property="Foreground" Value="Black"/>
<Setter Property="BorderBrush" Value="Black"/>
</Trigger>
<MultiDataTrigger>
<MultiDataTrigger.Conditions>
<Condition Binding="{Binding Path=IsEnabled, RelativeSource={RelativeSource Self}}" Value="True" />
<Condition Binding="{Binding Path=IsFocused, RelativeSource={RelativeSource Self}}" Value="False" />
<Condition Binding="{Binding Path=(Validation.HasError), RelativeSource={RelativeSource Self}}" Value="False" />
<Condition Binding="{Binding Path=Text, RelativeSource={RelativeSource Self}, Converter={StaticResource IsEmptyStringConverter}}" Value="False" />
</MultiDataTrigger.Conditions>
<MultiDataTrigger.Setters>
<Setter Property="Background" Value="Green" TargetName="Border" />
<Setter Property="Foreground" Value="White"/>
</MultiDataTrigger.Setters>
</MultiDataTrigger>
If I do not set a Local Value for the Background property, this code works as expected. When focused, my Background turns to yellow and when not focused and there are no Validation errors, it turns green.
However, If I set a Local Value for the Background property, like in the following code snippet, the Background behavior is pretty strange when one of the triggers is fired.
<custom:TextBox Background="Orange" />
When the IsFocued trigger is fired, the Background stays Orange. I assume that this is due to the Dependency Property Setting Precedence of WPF, which favors Local Values over those set in a Style Trigger. And yes, the Trigger is definitely fired, as the BorderBrush and Foreground changes.
When the second Trigger (MultiDataTrigger for Validation) is fired, the Background changes to Green. The Local Value is ignored.
As far as I can see this is inconsistent. Why does the first Trigger use my Local Value while the second one sticks to its Background Setter? Are there any different priorities between these two triggers?
your missing TargetName="Border" in the isFocused trigger for the background setter so your isfocused and multidata triggers are most likely targeting different backgrounds
Thank you! When I add the attribute, the Background turns yellow when selected. Does that mean that when I target a specific Control in the Setter, the value is not overwritten by a Local Value?
basically think of it like this your textbox control is just a collection of child controls when you set the local value your setting the property of the base control when your targeting "Border" your specifying the property of a child control that overlays your base control
Style triggers or Template trigger can't change a value set by Local Value
but they can change the inherited properties of the children of that control using TemplatedParent template
Example:
<Window x:Class="WpfApplication2.MainWindow"
xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:d="http://schemas.microsoft.com/expression/blend/2008"
xmlns:mc="http://schemas.openxmlformats.org/markup-compatibility/2006"
xmlns:local="clr-namespace:WpfApplication2"
mc:Ignorable="d"
Title="MainWindow" Height="350" Width="525">
<Window.Resources>
<Style TargetType="{x:Type Button}">
<Setter Property="Background" Value="Red"></Setter>
<Style.Triggers>
<Trigger Property="IsMouseOver" Value="True">
<Setter Property="Background" Value="Green"></Setter>
</Trigger>
</Style.Triggers>
</Style>
</Window.Resources>
<Grid>
<Button Width="100" Height="100" Background="Pink"></Button>
</Grid>
</Window>
In the example The Button Backgound Property will stay Pink (The Local Value) you can see that using Snoop
But if you hover over the button you will see that the background are changing that happens because the children of the button (a Border) is changed by its parent TemplatedParent and TemplatedParent.Trigger. Again use Snoop to see the value source
| common-pile/stackexchange_filtered |
Transitive injection of CDI bean
I have a REST controller
@Path("/greet")
@RequestScoped
public class GreetController {
@Inject
private GreeterService greeterService;
@GET
@Path("{name}")
@Produces(TEXT_PLAIN)
public String greet(@PathParam("name") String name) {
return greeterService.greet(name);
}
}
which uses the GreeterService
@RequestScoped
public class GreeterService {
@Inject
private Greeter greeter;
public String greet(String name) {
return greeter.greet(name);
}
}
The GreeterService itself injects a Interface which has two implementations
@RequestScoped
@Hello
public class HelloGreeter implements Greeter {
@Override
public String greet(String name) {
return "Hello " + name;
}
}
and
@RequestScoped
@Whazzup
public class WhazzupGreeter implements Greeter {
@Override
public String greet(String name) {
return "Whazzup " + name;
}
}
@Whazzup and @Hello are simply Qualifiers.
I'm trying to inject the correct implementation of the Greeter-Interface into the GreeterService based on the Qualifier used in the GreetController.
Is this even possible using CDI?
An easy workaround would of course be to have two GreeterServices for each Qualifier but this seems to be a bad idea as the count of GreeterServices increases when new Qualifiers are added.
Of course this is just a basic example and the GreeterServcie is unnecessary but I'm not allowed to post my companies code to StackOverflow so I had to implement a simple example which reproduces our architecture.
Thanks in advance!
Your description seems to somewhat contradict what you say you want to achieve.
By putting a qualifier on an injection point in GreeterController, you are saying that you want an implementation of GreeterService with given qualifier.
E.g. for @Inject @Wazzup GreeterService service, there has to be a bean of type GreeterService and with qualifier @Wazzup. So the more qualifiers you have will be using for that, the more beans you will need.
You could also have an "ultimate producer" that creates beans for all injection points of given type and does that bases on inspecting the injection point. From there, you could see what qualifier is used. Is that what you are after? It would still require you too specify a qualifier on @Inject Greeter greeter; though.
What I want is that my GreeterService chooses the right implementation of Greeter based on the annotation I provided in the GreetController. I dont know if this is the right way to achieve such behaviour.
The qualifying annotations at the class-level of GreeterService and the field greeterService in the controller are just part of my tries.. I removed them in the original post.
Maybe this makes my intention more clear: I want to control which implementation of Greeter will be injected into the GreeterService based on the controller.
I see no qualifiing annotations at the injects. Who should have the responsibility to decide what Greeter should be injected. That one should be able to use the qualifiers.
If you take a look at the history of the original post you see that I removed the qualifiers at the inject in class GreeterController. I know that I actually have to put the qualifier at the field GreeterService#greeter but I want to control which Greeter will be injected from the GreeterController without having to define a HelloGreeterService and a WhazzupGreeterService.
To inject a qualified thing, make sure the qualifier appears on the thing and the injection point:
@Inject
@Special
private Thing thing;
@Produces
@Special
private Thing makeSpecialThing() {
return someSpecialThingCreatedOrGottenFromSomewhere;
}
From some of your comments earlier, it appears that you already know this and for whatever (possibly valid, possibly not) reason don't really want to do this.
To pick and choose a thing dynamically at runtime, i.e. once your application starts, as it seems like perhaps you want to do, you can use the facilities exposed by Instance (I've made the example below deliberately over-verbose for (hopefully) clarity:
final Instance<Object> instance = ... // get an Instance, e.g. via CDI.current() or BeanManager.createInstance()
final Instance<Thing> anyAndAllThings = instance.select(Thing.class);
final Instance<Thing> onlyTheSpecialThing = anyAndAllThings.select(new AnnotationLiteral<SpecialThing>() {});
final Thing theSpecialThing = onlyTheSpecialThing.get();
This pattern would let you choose whether you wanted your Hello-qualified Greeter or your Whazzup-qualified Greeter at any point.
Usually the desire to do this sort of work at runtime rather than startup time is the tip of a larger problematic iceberg but can be valid in some cases.
| common-pile/stackexchange_filtered |
C Looping a switch?
so here is my code:
My problem is that I want to loop the switch statement with a while loop, depending on what char response gets (at the bottom).
I have tried putting the whole switch statement in a do while loop (that flopped).
Anyways I'm new to this language and I wanted to try to make a complex program.
#include <stdio.h>
#include <stdlib.h>
#define LINE "____________________"
#define TITLE "Tempature Converter"
#define NAME "Jose Meza II"
char scale, response;
float temp, celTemp, farTemp, kelTemp;
int main() {
printf("\n \t %s \n \t %s \n", LINE, TITLE );
printf("\t by %s \n \t %s \n\n", NAME, LINE );
printf("Enter C for Celsius, F for Fahrenheit, or K for Kelvin: ");
scanf("%c", &scale);
printf("Tempature value: ");
scanf("%f", &temp);
switch (scale)
{
case ('c'): /* convert to Fahrenheit and Kelvin */
{
farTemp = (temp * 9 / 5) + 32;
kelTemp = temp + 273.15;
printf("Fahrenheit = %.2f\n", farTemp);
printf("Kelvin = %.2f\n\n", kelTemp);
break;
} /* end case 'c' */
case ('f'): /* convert to Celsius and Kelvin */
{
celTemp = (temp - 32) * 5 / 9;
kelTemp = (temp - 32) * 5 / 9 + 273.15;
printf ("Celsius = %.2f\n", celTemp);
printf ("Kelvin = %.2f\n\n", kelTemp);
break;
} /* end case 'f' */
case ('k'): /* convert to Celsius and Fahrenheit */
{
celTemp = temp - 273.15;
farTemp = (temp - 273.15) * 9 /5 + 32;
printf("Celsius = %.2f\n", celTemp);
printf("Fahrenheit = %.2f\n\n", farTemp);
break;
} /* end case 'k' */
default: exit(0); /* no valid temperature scale was given, exit program */
} /* end switch */
printf("Enter in C to Continue, or S to stop: ");
scanf(" %c", &response);
return 0;
}
What can I do?
I have tried:
do
{
printf("Enter C for Celsius, F for Fahrenheit, or K for Kelvin: ");
scanf("%c", &scale);
printf("Tempature value: ");
scanf("%f", &temp);
switch (scale)
{
case ('c'): /* convert to Fahrenheit and Kelvin */
{
farTemp = (temp * 9 / 5) + 32;
kelTemp = temp + 273.15;
printf("Fahrenheit = %.2f\n", farTemp);
printf("Kelvin = %.2f\n\n", kelTemp);
break;
} /* end case 'c' */
case ('f'): /* convert to Celsius and Kelvin */
{
celTemp = (temp - 32) * 5 / 9;
kelTemp = (temp - 32) * 5 / 9 + 273.15;
printf ("Celsius = %.2f\n", celTemp);
printf ("Kelvin = %.2f\n\n", kelTemp);
break;
} /* end case 'f' */
case ('k'): /* convert to Celsius and Fahrenheit */
{
celTemp = temp - 273.15;
farTemp = (temp - 273.15) * 9 /5 + 32;
printf("Celsius = %.2f\n", celTemp);
printf("Fahrenheit = %.2f\n\n", farTemp);
break;
} /* end case 'k' */
default: exit(0); /* no valid temperature scale was given, exit program */
} /* end switch */
printf("Enter in C to Continue, or S to stop: ");
scanf(" %c", &response);
}
while(response == 'c');
A suggestion: try to make a program simpler. not more complex.
Is this your homework? What exactly have you tried? Show me your tried do-while loop and we can see what you may have done wrong.
What is the fun in that (I'm 14 btw)
so no. not my homework
The most common way is to use a do-while loop. Make following changes
1. initialize `response` to `C`.
2. put the `switch` case and next `printf()` and `scanf()` in `do..while()` loop.
3. in the `while ()` condition, check `while (c == `C`)`.
After the edit, the problem seems to be
scanf("%c", &scale);
you need to have a space before %c, like
scanf(" %c", &scale);
to avoid the ENTER [\n]after C at last.
Like C is, pretty interesting.
@el_coder just as the same reason for scanf(" %c", &response);
| common-pile/stackexchange_filtered |
How do I make a program immutable?
Say I have a deployed Solana Program with an upgrade authority. What is the best practice to revoke this authority?
Found the answer in the docs:
When deploying a program normally, I use
solana program deploy
which makes it upgradeable by default. If I want to make it non-upgradeable, I can deploy (an update) using:
solana program deploy --final
Don't forget to click ‘accept answer’ to mark your self-answer as the accepted one!
It only lets me do that after 2 days, but thanks for the reminder nonetheless :)
If you just want to make an existing program immutable, it's easier to simply remove the upgrade authority, rather than going through another deployment:
solana program set-upgrade-authority <PROGRAM_ADDRESS> --final
| common-pile/stackexchange_filtered |
C# Multiple If statement replacement
I am trying to make a practice program that calculates a persons overall grade. The test consists of three parts each with different weightings. Part 1 is worth 5 grades, part two 3 grades and part 3 is worth 2 grades.
So if a person got A B C they would receive 5A 3B and 2C.
Now, in order to receive and A/B/C overall requires a certain amount of each grade. For example in order to receive an A overall you need to have at least 5A's and 7 of the grades must be B or higher and all the grades need to be C or or higher.
B, C, D etc all have their own requirements too.
What is the best way to code this as at the moment I am using a counter for each grade and then doing if/else if statements to check the amount of each grade a person has got like so:
if (aGradeCount >= 5)
{
//Add total grade
}
}
}
//To receive a B
if(bGradeCount >= 3 && aGradeCount <5 && cGradeCount >=2)
{
if(bGradeCount + cGradeCount +dGradeCount + aGradeCount>= 7)
{
if(dGradeCount <= 3)
{
//Add new total grade
}
}
}
Now I understand this is terrible practice, but how can I code this better? Using a switch statement? If so how do I go about doing that?
There is something messed up in your code above; there are way too many close curly braces in the first if statement. Can you show how the code is actually structured?
if(bGradeCount >= 3 && aGradeCount < 5 && cGradeCount >= 2 && dGradeCount <= 3 && aGradeCount + bGradeCount + cGradeCount + dGradeCount >= 7){...} you can move the dGradeCount <= 3 to the end of the if() if you need to worry about short circuiting.
The nested ifs are ok, though the grading policy itself is bizarre. I don't think I ever encountered such a grading scheme - is it a real world one, or something from the Story Problem alternate universe?
how can I code this better?
Write a specification. Then for every concept mentioned in the specification, write a method. Here's part of a specification; you already wrote it:
in order to receive an A overall you need to have at least 5 A's and least 7 of the grades must be B or higher and all the grades need to be C or better.
Break it down
in order to receive an A overall
at least 5 A's AND
at least 7 of the grades must be B or higher AND
all the grades need to be C or better
OK, now we can start turning that into a method:
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// TODO: at least 5 A's AND
// TODO: at least 7 of the grades must be B or higher AND
// TODO: all the grades need to be C or better
// If these conditions are not met then an A is not earned.
return false;
}
All right we have turned our specification into code. Wrong code, but code. Let's keep going. We have a line of a specification. Write a method:
static bool AtLeastFiveA(int aCount)
{
return aCount >= 5;
}
Hey, that was a correct method. We are making progress. Now use it:
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// at least 5 A's AND
// TODO: at least 7 of the grades must be B or higher AND
// TODO: all the grades need to be C or better
bool atLeast5A = AtLeastFiveA(aCount);
// If these conditions are not met then an A is not earned.
return false;
}
Now we have another problem. At least 7 are B or higher. OK, write a method:
static bool AtLeastSevenB(int aCount, int bCount)
{
return aCount + bCount >= 7;
}
Another correct method! Use it!
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// at least 5 A's AND
// at least 7 of the grades must be B or higher AND
// TODO: all the grades need to be C or better
bool atLeast5A = AtLeastFiveA(aCount);
bool atLeast7B = AtLeastSevenB(aCount, bCount);
// If these conditions are not met then an A is not earned.
return false;
}
Now we need the last bit:
static bool NoD(int dCount)
{
return dCount == 0;
}
Put it together.
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// at least 5 A's AND
// at least 7 of the grades must be B or higher AND
// all the grades need to be C or better
bool atLeast5A = AtLeastFiveA(aCount);
bool atLeast7B = AtLeastSevenB(aCount, bCount);
bool noD = NoD(dCount);
if (atLeast5A && atLeast7B && noD)
return true;
// If these conditions are not met then an A is not earned.
return false;
}
Now, the question to ask yourself is:
Is this code correct? GET IT CORRECT FIRST. This code is very verbose but I'll tell you right now, it exactly matches the specification you gave.
Once the code is correct, can we make it more clear?
Yes; we could for instance say:
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// at least 5 A's AND
// at least 7 of the grades must be B or higher AND
// all the grades need to be C or better
bool atLeast5A = AtLeastFiveA(aCount);
bool atLeast7B = AtLeastSevenB(aCount, bCount);
bool noD = NoD(dCount);
return atLeast5A && atLeast7B && noD;
}
And now maybe you will say, you know, some of these methods are unnecessary abstractions, maybe I can just replace them with their bodies:
static bool QualifiesForA(int aCount, int bCount, int cCount, int dCount)
{
// In order to receive an A overall we require:
// at least 5 A's AND
// at least 7 of the grades must be B or higher AND
// all the grades need to be C or better
bool atLeast5A = aCount >= 5;
bool atLeast7B = aCount + bCount >= 7;
bool noD = dCount == 0;
return atLeast5A && atLeast7B && noD;
}
The point is: we start from a very verbose, CLEARLY CORRECT program, and then we make small, simple, clearly correct transformations to make it more concise. When you think you have a good balance of concision and readability, stop.
OK, now you have solved the problem of "did we earn an A?" Now you do "did we earn a B?" and so on. Write a specification for every part, and then write code that clearly implements the specification.
This sounds like a heavyweight process, but this will pay huge dividends as you learn how to program. Your code will be better organized, it will be less buggy, it will be easier to read and understand and modify.
The point of this technique is to focus on obvious correctness of every part. Always concentrate on obvious correctness. A program which is correct but you cannot tell it is correct is a program that might not be correct! Always concentrate on correctness first. Making a wrong program more elegant, or faster, or more feature complete means that you have an elegant, fast, rich-featured bug farm.
Summing up:
Write clear specifications.
Write code to clearly match the spec.
Tiny methods are A-OK. You can always eliminate them later.
Correctness is more important than everything else; make the code better once you know it is correct.
In addition, I would also get rid of the 'magic' numbers, and either make them constants or use an enumeration which will help out a lot in the readability aspect of the checks.
@d.moncada: Sure. We could go a lot farther than that. This is plainly a beginner programmer here so let's not get into the pattern of constructing a hierarchy of policy objects and then applying policy to data, blah blah blah. The point I wanted to get across here is that you organize your code better by having organized ideas in the first place.
Thank you for laying that out, it makes a lot more sense.
@KerrFloyyd: You're welcome. This is how I learned to program. This is how I still program! As you get better you start doing more and more of these steps in your head rather than on the screen. Doing them on the screen builds up the mental patterns of thought so that you can do them fast in your head later.
@EricLippert: great guidance. I'd use a bit of Single Responsibility Principle to make the code even more understandable & help with breaking the problem down a bit more.
This answer is pure gold, I wished I could have learned this with an answer like this back when I started programming, instead of learning it the hard way, by trial and error... MANY errors! :-)
For code clarity I would do this that way :
//Main function
{
///code
if(MethodWhichDescribesLogic(aGradeCOunt,bGradeCount,cGradeCount,dGradeCount){
char b = 'B';
person.TotalGrade = b.ToString();
}
}
Then in some place else :
bool MethodWhichDescribesLogic(type aGradeCount, type bGradeCount, type cGradeCount, type dGradeCount){
return
(PassingGrade(bGradeCount,aGradeCount,cGradeCount) &&
GoodGradesType(bGradeCount,cGradeCount,dGradeCount,aGradeCount) &&
dGradeCount <= 3);
}
bool PassingGradesCount(type bGradeCount,type aGradeCount,type cGradeCount)
{
return bGradeCount >= 3 && aGradeCount <5 && cGradeCount >=2;
}
bool GoodGradesCount(type cGradeCount,type bGradeCount,type aGradeCount,type dGradeCount)
{
return bGradeCount + cGradeCount +dGradeCount + aGradeCount>= 7;
}
Remember that every if-else switches can be replaced by conditional table.
So if overall grades count would be like 10. It could be
A B C Overall
5 7 10 A
4 7 10 B
Then you make array of it and find where you are in the array.
For example (I admit that I'm puzzled by your example so I might get it wrong here.):
var grades = new[]{
new { A = 5. B = 7, C = 10, Overall = "A"},
new { A = 4, B = 7, C = 10, Overall = "B"},
...
}
var myGrade = grades.FirstOrDefault(g => myA >= g.A && myB >= g.B && enoughC)
With proper formatting it looks much better than tons of if's. And you always have your choice table in front of you.
I don't know if that's terrible practice. It's a little unnecessary, since there's nothing else in the block, but another if statement. You can use more && operators and parenthesis if you just want to use one if statement.
if ((bGradeCount >= 3 && aGradeCount <5 && cGradeCount >=2) &&
(bGradeCount + cGradeCount +dGradeCount + aGradeCount>= 7) &&
(dGradeCount <= 3))
{
char b = 'B';
person.TotalGrade = b.ToString();
}
Parenthesis make it more readable, but I don't think they are necessary since everything is &&ed.
No they're not necessary at all. Just a way to keep his comparisons separate and maintainable.
Firstly, make use of Boolean Algebra in the OverallGrade() to see which cases you don't need to consider. For example, if for checking an A grade you have already seen that gradeDistribution.A >= 5, don't test gradeDistribution.A < 5 when testing for grade 'B' as it is obvious that if you are testing the case for B, you have already tested gradeDistribution.A < 5 as true.
Next, put the grade calculation in another method and make that method return as early as possible.
Finally, in order to get the overall grade, you may write the method as:
private static char OverallGrade(int partA, int partB, int partC)
{
//Now in this method, check one by one which
//overall grade the provided values fall in
//Call another method to get count of individual As, Bs, Cs etc
var gradeDistribution = GetIndividualCount(partA, partB, partC);
//Now, first check for A and return immediately if true
if (gradeDistribution.A >= 5) return 'A';
//Now, check for B and again return if the values satisfy for B
if (gradeDistribution.B >=3
&& gradeDistribution.C <= 2
&& gradeDistribution.D <= 3
&& ...)
return 'B';
//Keep adding for each case and return as soon as you find the grade.
}
Now, the class GradeDistribution, whose variable we have used above can be created which will hold count of each grade:
public class GradeDistribution
{
public int A; //Count for Grade A
public int B; //Count for Grade B
public int C; //Count for Grade C
public int D; //Count for Grade D
}
The above code is an example of introducing a class for an entity that does not exist in real world.
Next, GetIndividualCount() can be written as:
private static GradeDistribution GetIndividualCount(int partA, int partB, int partC)
{
var gradeDistribution = new GradeDistribution();
/*
Calculate and assign values to gradeDistribution.A, gradeDistribution.B...
*/
return gradeDistribution;
}
| common-pile/stackexchange_filtered |
Beginner Looking for Guidance on Methods
I am trying to complete a project where we have to take a set of data of random numbers between 0 and 364 and see what percentage of the runs (the method takes the number of items in the data set and the number of runs) have a duplicate number in them.
Below is what I have so far, but I am really struggling to figure out how to do this as I keep getting errors:
public double calculate(int size, int count) {
double percentage;
int matches = 0;
int check;
int i = 0;
int add = 0;
Random rnd = new Random();
List<Integer> birthdays = new ArrayList<>();
for (int k = 1; k <= count; k++){
rnd.setSeed(k);
do{
add = rnd.nextInt(365);
birthdays.add(add);
i++;
}while (i < size-1);
//birthdays.add(rnd.nextInt());
for (int j = 0; j <= size-1; j++) {
check = birthdays.get(j);
if (birthdays.indexOf(check) != birthdays.lastIndexOf(check)) {
matches++;
j = size;
}
}
}
percentage = (double)matches / count * 100;
//percentage = 8;
return percentage;
}
What errors are you getting?
OutofBoundsException
I am not sure what I am doing wrong with regard to even answering the question. Any help would be appreciated
What's the difference between the size and count params
There are a couple of problems with your code here. One is the while loop directly after the first for loop that doesn't seem to be doing anything. It says:
while(i < size-1);
That might be giving you an error, but error you are talking about is probably from your second for loop. When you instantiate it you say:
for (int j = 0; j <= size-1; j++)
but then you say:
j = size;
since when you instantiate it you say j<=size-1 but then you set j=size it will give an error.
| common-pile/stackexchange_filtered |
Accessing DataSet Returned from my DAL
I am attempting to use ASP.Net and generate a Database Access Layer for re-usability. I am using the below to pass a stored procedure name to my DAL and execute the stored procedure, then return the results as a dataset.
However, in the calling class, I am getting the error
the name ds does not exist in the current context
What should I alter so that I can access the returned dataset in my calling class?
class DatabaseAccessLayer
{
public DataSet RunSQLServerStoredProcedure(string uspName)
{
using (SqlConnection con = new SqlConnection(essToUse))
{
SqlCommand sqlComm = new SqlCommand(uspName, con);
sqlComm.CommandType = CommandType.StoredProcedure;
SqlDataAdapter da = new SqlDataAdapter();
da.SelectCommand = sqlComm;
DataSet ds = new DataSet();
da.Fill(ds);
return ds;
}
}
}
class Gopher
{
DatabaseAccessLayer dal;
public void PopulateHootersStores()
{
DataRow dr;
dal.RunSQLServerStoredProcedure("ReturnStoredProc");
DataTable dt = ds.Tables[0];
dr = dt.NewRow();
dr.ItemArray = new object[] { 0, "--Select A Store--" };
dt.Rows.InsertAt(dr, 0);
//Adding in All option
dr = dt.NewRow();
dr.ItemArray = new object[] { 1, "All" };
dt.Rows.InsertAt(dr, 1);
cboTest.ValueMember = "StoreNumber";
cboTest.DisplayMember = "StoreName";
cboTest.DataSource = dt;
con.Close();
}
}
At a quick glance the following errors stand out.
Your dal variable isn't declared instance, since you didn't define static you need to provide a new operator to create an instance.
You call ds but don't have it defined in your gopher class, thus it doesn't exists or have a definition.
Your data access layer class method returns a data set, but you instantiate without storing the returned value. Example:
var set = new DataAccessLayer().RunSQLServerStoredProcedure(...);
The variable set will now hold your returned data set from the method, you also define your data access layer.
| common-pile/stackexchange_filtered |
Recommendations on proper refactoring in a When statement?
I'm trying to call two lengthy commands in a when statement, but for some reason, because of its syntax, it performs two of the commands twice when it is called :
@email = Email.find(params[:id])
delivery = case @email.mail_type
# when "magic_email" these two delayed_jobs perform 2x instead of 1x. Why is that?
when "magic_email" then Delayed::Job.enqueue<EMAIL_ADDRESS>@email.body)
Delayed::Job.enqueue<EMAIL_ADDRESS>@email.body)
when "org_magic_email" then Delayed::Job.enqueue<EMAIL_ADDRESS>@email.body)
when "all_orgs" then Delayed::Job.enqueue<EMAIL_ADDRESS>@email.body)
when "all_card_holders" then Delayed::Job.enqueue<EMAIL_ADDRESS>@email.body)
end
return delivery
How can I make it so that when I hit when "magic_email", it only renders both those delayed jobs once ?
I didn't know there is a then. May I ask what you are doing with delivery? Does it have a value after the case statement is finished?
I forgot to add the return at the end. In that way when this method is executed, it just peforms whichever delayed::job it lands on.
I have tried this with following example:
q = []
a = case 1
when 1 then q.push 'ashish'
q.push 'kumar'
when 2 then q.push 'test'
when 4 then q.push 'another test'
end
puts a.inspect #######["ashish", "kumar"]
This is working fine. It means your case-when syntax is ok. It might be you have aome other problem.
Thanks I ended up just putting both the functions together in a single delayed_job.
You are calling return delivery and delivery varible may be having the value to call the delayed job again. It depends on what the then statement returns, so try not to return anything if possible. I believe you want to do the delayed job and not return anything by using the function.
Perhaps you should just have the case and dont store it in any variable. I mean delivery variable has no purpose here.
| common-pile/stackexchange_filtered |
bit shifting contiguous block of memory using a char array
Trying to create my own base64 encode, I know there are others out there but every time I try to reinvent the wheel I learn something.
I am trying to use a full char array as a block of memory and shift out what I would I need.
I have tried casting it to an uintptr_t.
My thoughts at this point are that I am just right shifting the pointer address rather then the contiguous memory block, if that is even possible.
Any thoughts appreciated.
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "base_n.h"
#include <stdint.h>
int main (void){
char *foo = "foo";
char * base64encode(char * input_string){
/*
* result needs to be 24 bits.
* +--first octet--+-second octet--+--third octet--+
* |7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0|7 6 5 4 3 2 1 0| input blcoks
* +-----------+---+-------+-------+---+-----------+
* |5 4 3 2 1 0|5 4 3 2 1 0|5 4 3 2 1 0|5 4 3 2 1 0| output blocks
* +--1.index--+--2.index--+--3.index--+--4.index--+
* lets define a block as 3 bytes for input and 4 bytes for output
* if input has a remainder when divisible by 3 then we need to add that
* remainder for to complete the block.
* the constant k has a 4:3 ratio from output:input block.
* size of memory needed for output is
* 4(((strlen(input) % 3) + (strlen(input)))/3)
*/
unsigned int input_string_length = strlen(input_string);
unsigned int result_length = (4*(((input_string_length % 3) + (input_string_length))/3));
char *result = malloc(result_length *sizeof(char));
char *encode = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
/* the number of bits in input_string is input_string_length * 8
* if 6 bits are subtracted from the bit size of string length each
*iteration and masked with 0b0011111 or 0x3f then you get value needed for
* that iteration.
* keep interating while you have more than 8 bits.
* */
/* +----+
* |ptr | 4 or 8 bytes
* +----+
* |
* | +---+---+---+---+
* -------> array | f | o | o |\0 |
* +---+---+---+---+
*
*
*/
unsigned int shifter = (strlen(input_string)*8);
unsigned int blocks_used = 0;
while(shifter >= 6){
printf(" shifter: %d\n", shifter);
shifter-= 6;
printf(" input_string shifted %d is %d: \n", shifter, ((*input_string >> shifter) & 0x3F));
*result++ = encode[((*input_string >> shifter) & 0x3F)];
printf(" result : %s\n", result);
blocks_used++;
}
/* result_length - blocks_used tells you how many 6 bit blocks are left
* if the amoult left is 3 then you need the least two significant
* bits from from the 8 byte block since only two bits of the 8 bit
* blocks are left, this makes the last two 6 bit blocks padding ('=').
* If 2 6 bit blocks are left then the last 2 6 bit blocks are padding
* ('=').
* If 1 6 bit block is left then just add padding to the last block
* is just padding ('=').
* */
printf(" result_length - blocks_used : %d\n", result_length - blocks_used);
// TODO fix my broke ass idea that is below
switch(result_length - blocks_used){
case 3:
*result++ = encode[input_string[input_string_length-1] & 0x03];
*result++ = '=';
*result = '=';
break;
case 2:
*result++ = '=';
*result = '=';
break;
case 1:
*result = '=';
break;
default:
break;
}
// END TODO
return result;
}
char *out = base64encode(foo);
printf(" base64encode : %s \n", out);
return 0;
}
Output:
shifter: 24
input_string shifted 18 is 0:
result :
shifter: 18
input_string shifted 12 is 0:
result :
shifter: 12
input_string shifted 6 is 1:
result :
shifter: 6
input_string shifted 0 is 38:
result :
result_length - blocks_used : 0
base64encode :
Please try to reduce your code to a [mre]. The fact that you are trying to encode in B64 is not that relevant, the problem is more general, and the code can be reduced. Shifting a "block of memory" is indeed not possible, if that's what you're trying to do. You would have to shift and carry every single 4-byte (or 8-byte) chunk one at a time.
Nested functions in C ? Good one.
In general, strlen is no good for determining the length of a binary block of data to be encoded into a base64 string. Also char might be a signed type, and the >> operator might not work as you want. It is better to use unsigned char for binary data.
You should indent your code properly. Poorly indented code is hard to debug and hard to understand
Also, what's base_n.h?
Just function prototypes, trying to implement rfc 4648 in full. Have not programmed in a few years and just trying to get back on the horse.
There is no question in your post. Ask a specific question.
| common-pile/stackexchange_filtered |
Site shows URL instead of "meta title" after deleted few plugins (not any SEO plugin was deleted)
www.Pifeed.net
after i deleted some useless plugins due to load-time issues the blog pages show url instead of title tag, though meta title tag is there in source code..
thanks for answers
your title tag is inside the script tag. It must be placed outside script. So you have to close the script tag for "adsbygoogle".
| common-pile/stackexchange_filtered |
Can Nginx limit the request by status code?
I wanna limit the ip who has 150 nums 500 status code request last minute.
I only get the limit_req_zone module that can limit by the ip count.
Is there any way to limit by status code?
Thanks
This could be of help: https://serverfault.com/questions/907860/nginx-limit-request-based-on-response-status-code#987321
It is possible to limit by any key. See limit_req_zone.
Example:
limit_req_zone "$binary_remote_addr$request_uri" zone=one:10m rate=2r/s;
Notice the $binary_remote_addr$request_uri. You can use any variable here (ip + status code in your case).
Can i only limit the status code 500? when using "ip + status code " mode , the code 200 will be limited.
| common-pile/stackexchange_filtered |
@OneToOne relationship between JPA entities that don't share any foreign key relationship
I have two entities as follows:
Invoice
Primary Key is InvoiceId
Other columns of interest are [OrgId, VendorId, VendorInvoiceId]
InvoiceState
Primary Key is InvoiceStateId
Other columns of interest are [OrgId, VendorId, VendorInvoiceId]
The "other columns of interest" form a unique key in both the tables, except that the second table, InvoiceState, can contain records that doesn't have a corresponding record in the first table, Invoice.
At the entity layer, I would like define Invoice entity to contain a reference to InvoiceState entity with a @OneToOne relationship defined, with @JoinColumns as follows:
@OneToOne
@JoinColumns
({
@JoinColumn(name="OrgId", referencedColumnName="OrgId"),
@JoinColumn(name="VendorId", referencedColumnName="VendorId"),
@JoinColumn(name="VendorInvoiceId", referencedColumnName="VendorInvoiceId")
})
But this throws an exception that the foreign key count is not the same in both entities. I don't even have a foreign key defined between these two tables.
Is there a way to define a @OneToOne relationship between two entities that don't share Foreign Keys, but have a set of column that can be used during JOIN?
The name of an "entity" really ought to be a noun to be more understandable, hence Invoice and InvoiceState ... fwiw.
I would advice to map the above 3 columns as a component using @Embeddable and use the component in for mapping using @Embedded and defining the joining condition.
e.g. below:
@Embeddable
public class ReferenceInfo {
private Long orgId = null;
private Long vendorId= null;
private Long vendorInvoiceId= null;
.........
.........
}
@Entity
public class Invoces{
private Long invoiceId = null;
private ReferenceInfo refInfo = null;
private InvoiceStates invoiceStates = null;
@Id
public Long getInvoiceId(){
return invoiceId;
}
......
@Embedded
public ReferenceInfo getRefInfo(){
....
}
@OneToOne(mappedBy="refInfo"))
public InvoiceStates getInvoiceStates(){
return invoiceStates;
}
}
@Entity
public class InvoiceStates {
private Long invoiceStateId = null;
private ReferenceInfo refInfo = null;
@Id
public Long getInvoiceStateId(){
return invoiceStateId;
}
......
@Embedded
public ReferenceInfo getRefInfo(){
....
}
}
If the three fields are unique, you can mark them as the PK for the InvoicesState entity, which then allows state to reference them in a oneToOne. The PK used for JPA doesnt need to match the table id, it just needs to be unique.
Some providers do allow referencing non-PK fields in mappings. This isn't always suggested though because the entities are usually cached on PKs, so you may get extra database hits to resolve the reference. In Eclipselink, you could fake the mapping out by using only one of the fields, and correct the mapping in a descriptor customizer to add the remaining fields in the relationship.
These tables are currently quite large and also have a lot of history as to how they evolved and such changes at the DB layer require a lot of investigation and hence was looking for some solution to still define a proper one-to-one at the domain layer.
Marking the 3 columns as the ID in JPA doesn't change anything in the database, and allows you to use proper one to ones.
| common-pile/stackexchange_filtered |
Detecting the local style in a text with ConTeXt
I would like to detect the local style in a text. What is the ConTeXt equivalent of such these LaTeX commands?
\ifthenelse{\equal{\f@shape}{it}}{italic}{not italic}
\ifthenelse{\equal{\f@series}{bf}}{bold}{not bold}
I especially need the equivalent of \f@shape and \f@series commands (I know how to do a string comparison: \doifelse).
Check the value of \fontalternative.
Two possibilities come to mind that have different use cases.
Emphases
Using emphases and
alternative styles
you can just test for \currenthighlight which will expand to the
name given in the highlight definition.
% macros=mkvi
%% 1. define a couple styles
\setupbodyfont [iwona]
\definecharacterkerning [letterspace:wide] [factor=.125]
\definealternativestyle [emph:1] [{\feature [+] [smallcaps]}]
\definealternativestyle [emph:2] [{\setcharacterkerning [letterspace:wide]}]
%% 2. define emphases that use the styles
\definehighlight [emphone] [style=emph:1]
\definehighlight [emphtwo] [style=emph:2]
\definehighlight [emphthree] [emphtwo] [style=emph:2,color=red] %% derived
%% 3. define a conditional
\def \doifcurrenthighlight #name#true#false{%
\doifelse{\currenthighlight}{#name}{#true}{#false}%
}
\starttext
Some \emphone {important} text, and something marked up
\emphtwo {differently}.
Print current emphasis name: \emphone {\currenthighlight}.
Print only if matching emphasis:
\startitemize
\item \emphone {\currenthighlight} \emphone {\doifcurrenthighlight {emphone}{Yes}{No}}.
\item \emphtwo {\currenthighlight} \emphtwo {\doifcurrenthighlight {emphone}{Yes}{No}}.
\item \emphthree {\currenthighlight} \emphthree {\doifcurrenthighlight {emphtwo}{Yes}{No}}.
\item \emphthree {\currenthighlight} \emphthree {\doifcurrenthighlight{emphthree}{Yes}{No}}.
\stopitemize
\stoptext
The obvious limitation is -- as demonstrated by the test for
emphthree -- that you cannot test for the “super-macro” that the
current one inherits from.
Raw Font Switches
Without a description of what the \f@shape and \f@series macros
are supposed to accomplish it is hard to tell whether a “literal”
translation of your Latex code is possible.
From the names I’d guess they are NFSS related (please correct me!),
which is highly Latex specific so they are unlikely to have a direct
counterpart in Context.
By default, Context defines a different set of styles and shapes,
whose properties are exposed via macros:
\fontalternative: the name of the alternative, e. g. bf,
rm, etc.
\fontsize: current font size in pt.
\fontstyle: name of the style, e. g. rm, ss, etc.
These should allow for a good approximation of your Latex example at
the price of using raw formatting directives.
However, above solution utilizing style alternatives is more idiomatic
and should be preferred in hand-written code.
You are right that \f@series, etc. are NFSS-related: they are used to track the different 'font axes' by LaTeX. As ConTeXt stick's with a Knuth-like approach for e.g. \rm I'm not sure they are really translatable.
@phg Where can I learn about this syntax \def \doifcurrenthighlight #name#true#false?
@Manuel That’s MkVI syntax. Context has a preprocessor (see luat-mac.lua that is applied if a file begins with the %macros=mkvi directive or its name has the extension .mkvi.
The syntax is used extensively all over the Context source. There is not really anything to document since it works as you’d expect it to intuitively: named arguments are placeholders for TeX’s numbered ones.
That should be % macros=mkvi (note the space after %).
@Aditya Actually the heuristic is even looser, even allowing for something as ridiculous as %foobarmacros=mkvi.
| common-pile/stackexchange_filtered |
mapping using cylindrical map via script
I'm trying to create a default mapping mode for an object. I followed through understanding the python API for blender, and things are making more sense at this point, however I'm not able to create the UV map I wish on the object. Code below:
import bpy
from os import getcwd
file = getcwd()+'/data/naturalSat/models/2_Pallas_101.obj'
print("Testing")
imported_object = bpy.ops.import_scene.obj(filepath=file)
obj_object = bpy.context.selected_objects[0]
print("processing: "+obj_object.name)
#Extract the mesh and smooth all polygons
output = obj_object.data
for face in output.polygons:
face.use_smooth = True
bpy.ops.object.mode_set(mode = 'EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.uv.unwrap()
bpy.ops.uv.cylinder_project(direction='VIEW_ON_EQUATOR', scale_to_bounds=True)
#export
filePath = getcwd()+'/'+obj_object.name+'.obj'
bpy.ops.export_scene.obj(filepath=filePath)
Am I doing this correctly? one of the concerns I have is that the cylindrical_project arguments is direction = 'VIEW'. I'm loading a python script using the command line, so headless mode.
I am not a professional myself, but the code looks okay, I guess. Your direction isn't "view", though, but "VIEW_ON_EQUATOR", you could also use "VIEW_ON_POLES", "ALIGN_TO_OBJECT", though: see the documentation
If it's "right" or "wrong" depends a little on the shape of your objects and how you want the unwrap to be aligned to it. Maybe try out all three and see which one fits best?
They don’t change the mapping which is currently the texture mapped on every face
| common-pile/stackexchange_filtered |
When would I need to call form.ShowDialog(IWin32Window) rather than form.ShowDialog()?
I have some code, which calls the form.ShowDialog(IWin32Window) overload rather than the form.ShowDialog() one, but which passes in the current active window as the parameter.
Using the other overload seems to leave the behaviour intact. Is it safe to switch to the simpler overload? When would I want to pass in the IWin32Window parameter?
The IWin32Window parameter is for specifying the owner of the dialog. In this case it's an interface so that you can have non-managed windows (those from a COM object you're using, for example) as the dialog's owner.
I have to disagree with the users here who say that you shouldn't specify an owner. In fact, it's always advisable to specify an owner for a form when possible. While dialogs may not be as important as non-modal forms, getting into the habit of specifying an owner is always a good idea.
You can use the IWin32Window parameter if you want to parent your form in another application or form.
So for example you can get the handle of another application or form and show your form as a part of that application. You can find more information here
You should use the void constructor under normal circumstances. Like Nathan said, only use the IWin32Window constructor if you want to show the dialog with a specific owner. Which is not typically necessary.
The documentation for ShowDialog() states, "Shows the form as a modal dialog box with the currently active window set as its owner." So, unless you explicitly need to set the owner of a form to a window that is NOT the active window, there is no need to use ShowDialog(IWin32Window).
It's always a good idea to write your code to convey your intent, so even if you only want to parent it to the active window, specifying that window as a parameter makes your code explicit in its intentions rather than relying on those who maintain your code to determine which window was intended to be active when the dialog is shown.
So do you think that you should only use the parameterless overload when you want whichever window is the active window to be the parent?
| common-pile/stackexchange_filtered |
Using local package in React Native project not working
I need to install my local package in my react-native project.
I install the package using npm i /path-to-folder command
But i get this error Unable to resolve module @babel/runtime/helpers/interopRequireDefault
local package package.json
{
...
"devDependencies": {
"@babel/core": "^7.19.6",
"@babel/runtime": "^7.19.4",
"react": "^18.1.0",
"react-native": "^0.70.1",
"react-native-svg": "^13.2.0"
}
}
project's metro-config.js
resolver: {
nodeModulesPaths: [packagePath],
},
watchFolders: [packagePath],
| common-pile/stackexchange_filtered |
Excel - pivot table does not group dates
I am trying to group the dates in the pivot table into months and years. My original date data is in mm/dd/yyyy format in a table that is refreshed through an embedded SQL script.
For some reason when I create a pivot table from this raw table I am getting an error message saying "Cannot group this selection". I checked I don't have any blank data, the column is in date format. I tried everything, tried to convert from test -to -columns to dates, that in turn brings an error. There is something really wrong with it - not sure what.
Can someone help me, please?
More clarification:
My Raw Data is a table that is refreshable thru embedded SQL script that has a column with dates - let's say it's column A.
This column A that is populated from SQL has dates in the format 01/20/2016 and also has blanks. I am doing few steps thru formulas to create another column - column B. As i need to return the Sunday of that specific date - for example if the date is saying 9/20/2016 - column B will return 09/18/2016.
Bascially Column B has the dates equal to the dates in column A, and whatever is blank in column A is #N/A in column B.
I am creating column C to grab the dates from column B in the following way: Column C=IFERROR(COLUMN B,"01/01/2018"). So whatever in #N/A in column B I am saying let it be equal to "01/01/2018".
I am creating a pivot table from this raw data table and dragging the column C as it's mu date column. Then I want to group these dates into months and years as well. However I am either getting the error "Cannot group this selection" or if it is grouping it is creating another column called Group 1 which is not what i am looking for, don't know if it can be converted to months or years?
on the home tab there is a [Group Selection] section which is greyed out. I think if it is not greyed out it will be able to give me the option of choosing month and year hierarchy.
I checked column c ( my date column) there are no blanks - the format it 1/2/2016, don't know why it's not grouping.
I tried [Text to Columns] within pivot table, got an error. I tried [Text to Columns] in the raw data table it is converting columnn C into it its actual formulas: Iferror
Try turning the column to "General". If the dates change to number then you should be able to group. If they remain dates then they are text strings that look like dates and not true dates in Excel.
Thanks! I tried converting them into general - most of them got converted to numbers, but 5 of them remained as dates. ANother issue I tried grouping the ones that were converted to numbers and instead of bringing a hierarchy of month and year it just gives me Group one with the same filed title. Do you know why? The group selection field is greyed out on the home tab
There may be a locale issue. When dates are stored as text, converted to dates will work fine if the date is 1/1/2016, but will not properly convert all dates if your locale uses DMY and the text date is using MDY (or the other way around). Test a cell in your data source that has a day value greater than 12 and ensure that it is returned as a proper date.
After checking, turn your data source into an Excel Table Object with Ctrl-T or Insert > Table. Then use the filter drop-down on the date column. If the column contains only dates, you should not see individual entries, but a year/month/day expandable tree.
Also, double check for blanks. Maybe your pivot data source includes a blank row at the end of the source data.
Edit: If you get an error when doing text-to-columns to dates, then that is a clear indicator that the text cannot be interpreted as a date. In the Text to column wizard, ensure that you set the correct order of day, month and year, as it is in the source. In the screenshot you can see that the date is in the order MDY. My locale uses DMY by default, so I need to let the text to column wizard know that the source data is in MDY order.
Edit2: From your comments it looks like you are using a formula to construct the date. A formula cannot be processed by text to columns (why on earth would you do that????).
Ensure that the formula you use to create the dates returns real dates, not text that looks like a date. Edit your question, provide the details of the formula for inspection. And -- please, hold off with the comments. This is not a chatty forum.
So I am trying to apply [Text to Columns] and then MDY format, after that the date in a cell dissappears and the formula is displayed. My original date column A consists of the following:
Dates in the format (9/20/2016)
#N/A In order to not have #N/As in my dates column i add a second column B where I add a formula: =Iferror(cell from column A, "01/01/2018"
Basically I am denoting every #N/A as 01/01/2018 in column. When I create the pivot table i drag column B for the date field
Can it be that column B is a formula that's why it's not working? [Text to Column] when applying inside pivot table brings an error, when I apply in the be raw table before pivoting, it converts column B into it's formulas
Do you know how to solve this problem?
Whoa! This is not a chatty forum. Please stop putting all this into comments. Edit your question and put the details into your question. Then post a comment to alert people that you have made changes.
I edited my answer.
I am very sorry. I am first time to this forum. I edited my answer but not sure if it's seen to the audience. Please, let me know if it's not seen and I will try again. Thanks!
What answer? You need to edit your QUESTION.
Your PivotTable is interpreting "01/01/2018" as text rather than as a date.
Instead of
IFERROR(COLUMN B,"01/01/2018"), use
IFERROR(COLUMN B,DATE(2018,1,1)).
| common-pile/stackexchange_filtered |
Prove that there exists a bijective mapping $g:A\rightarrow B \;$ so that $g(x)=y$.
Let $f:A\rightarrow B\;$be a bijective mapping, $x$ an element of $A$ and $y$ an element of $B$. Prove that there exists a bijective mapping $g:A\rightarrow B \;$ so that $g(x)=y$.
So the above is the full question and I am frustrated. Could anyone hint me and correct the thoughts below if they are incorrect?
If $f:A\rightarrow B\;$ be a bijective mapping, $x$ an element of $A$ and $y$ an element of $B$ then $\exists\, g:A\rightarrow B \;$ such that $g(x)=y$.
Prove by contrapositive: If there isn't a $g:A\rightarrow B \;$ such that $g(x)=y \;\forall x \in A \; \forall y \in B$, then there doesn't exist a bijective mapping $f:A\rightarrow B$.
Is proof by contrapositive correct?
If not, may I have some directions?
Hints preferred.
$g: z\mapsto \begin{cases} y, & z=x, \ f(x), & z=f^{-1}(y), \ f(z), & \textrm{otherwise}.\end{cases}$
I agree, if $x=f^{-1}(y)$ then we are done.
No, it is not correct. Basically, you are just saying that $\alpha$ implies $\beta$ because if $\beta$ were false, then $\alpha$ would be false too, and nothing else.
Let $z=f(x)$. Let $\varphi\colon B\longrightarrow B$ be that map such that:
$\varphi(z)=y$;
$\varphi(y)=z$;
$\varphi(b)=b$ if $b\in B\setminus\{y,z\}$.
Then $\varphi$ is a bijection. Therefore, $\varphi\circ f$ is a bijection too. But $(\varphi\circ f)(x)=y$.
Thanks for the answer. I understand the way of constructing the new function. The only incomprehensible thing is that what's the purpose of the question? To test whether I could form a new function $\varphi$?
I assume that the purpose of the question is primarily to test whether you can properly write down a full proof that such a function exists (by providing an example and proving that the example satisfies the required properties). Learning proper reasoning is likely the main focus of your classes right now.
If we already have $f(x)=y$, then of course $f$ itself would work for $g$. Otherwise, $g$ is obtained by letting $g(x)=y$, $g(f^{-1}(y))=f(x)$, and $g(z)=f(z)$ for $z \notin \{x, f^{-1}(y)\}$.
| common-pile/stackexchange_filtered |
PicklingError in pyspark (PicklingError: Can't pickle <class '__main__.Person'>: attribute lookup Person on __main__ failed)
I am unable to pickle the below class. I am using data bricks 6.5 ML (includes Apache Spark 2.4.5, Scala 2.11)
import pickle
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
p1 = Person("John", 36)
pickle.dump(p1,open('d.pkl','wb'))```
PicklingError: Can't pickle <class '__main__.Person'>: attribute lookup Person on __main__ failed
Possible answer from here
The problem is that you're trying to pickle an object from the module where it's defined. If you move the class into a separate file and import it into your script, then it should work.
That solution isn't viable for me in an iPython notebook though. So here I some additional information from here
Python's pickle actually does not serializes classes: it does
serialize instances, and put in the serialization a reference to each
instance's class - and that reference is based on the class being
bound to a name in a well defined module. So, instances of classes
that don't have a module name, but rather live as attribute in other
classes, or data inside lists and dictionaries, typically will not
work.
One straight forward thing one can try to do is try to use dill
instead of pickle. It is a third party package that works like
"pickle" but has extensions to actually serialize arbitrary dynamic
classes.
While using dill may help other people reaching here, it is not your
case, because in order to use dill, you'd have to monkey patch the
underlying RPC mechanism PySpark is using to make use of dill instead
of pickle, and that might not be trivial nor consistent enough for
production use.
If the problem is really about dynamically created classes being
unpickable, what you can do is to create extra meta-classes, for the
dynamic classes themselves, instead of using "type", and on these
metaclasses, create proper getstate and setstate (or other
helper methods as it is on pickle documentation) - that might enable
these classes to be pickled by ordinary Pickle. That is, a separate
metaclass with Pickler helper methods to be used instead of type(...,
(object, ), ...) in your code.
However, "unpickable object" is not the error you are getting - it is
an attribute lookup error, which suggests the structure you are
building is not good enough for Pickle to introspect into it and get
all the members from one of your instances - it is not related (yet)
to the unpickleability of the class object. Since your dynamic classes
live as attributes on the class (which is not itself pickled) and not
of the instance, it is very well possible that pickle does not care
about it. Check the docs on pickle above, and maybe all you need there
is proper helper-method to pickle on you class, nothing different on
the the metaclass for all that you have there to work properly.
Instead of defining a Class, try defining a NamedTuple instead.
import pickle
from collections import namedtuple
Person = namedtuple("Person", "name age")
p1 = Person("John", 36)
pickle.dump(p1,open('d.pkl','wb'))
My solution:
Put class Person into a separate e.g. "utility.py"
import utility
import pickle
new_person = pickle.loads(pickle.dumps(utility.Person()))
It solved my ipykernel issue!
| common-pile/stackexchange_filtered |
List of all arrays not contained in other lists of arrays
I have a list of two dimensional points, represented as two element long lists/arrays. E.g.:
points =
[[ 10. , 10. ],
[ 11. , 10. ],
[ 10.5 , 9.1339746],
[ 10.5 , 10. ],
[ 10.75 , 9.5669873],
[ 10.25 , 9.5669873],
[ 2. , 2. ],
[ 3. , 2. ],
[ 2.5 , 1.1339746],
[ 2.5 , 2. ],
[ 2.75 , 1.5669873],
[ 2.25 , 1.5669873]]
I now want to have a list which does not contain certain elements of this first list.
exclude = [[2., 2.], [3., 2.], [2.5, 2.]]
Unfortunately
new_list = [p for p in points if p not in exclude]
will produce
[[ 10. , 10. ],
[ 11. , 10. ],
[ 10.5 , 9.1339746],
[ 10.5 , 10. ],
[ 10.75 , 9.5669873],
[ 10.25 , 9.5669873],
[ 2.75 , 1.5669873],
[ 2.25 , 1.5669873]]
instead of
[[ 10. , 10. ],
[ 11. , 10. ],
[ 10.5 , 9.1339746],
[ 10.5 , 10. ],
[ 10.75 , 9.5669873],
[ 10.25 , 9.5669873],
[ 2.5 , 1.1339746],
[ 2.75 , 1.5669873],
[ 2.25 , 1.5669873]]
It seems Python removes all elements here that have at least one element in common (and not all in common :/ ).
Is there any nice/short/elegant way to exclude elements if they are not completely contained in the first list?
Are these lists or arrays? in for arrays doesn't quite work like you'd expect.
it works for lists without any problem. Is it numpy arrays or lists?
It works just fine: http://ideone.com/JtRnc5
This question appears to be off-topic because it is about a non-existent problem with working code.
Note: Since this question has been tagged numpy, I'm assuming points is a NumPy array. If that's true, you could generate a boolean mask (array) using np.logical_and and np.logical_or:
import numpy as np
points = np.array(
[[ 10. , 10. ],
[ 11. , 10. ],
[ 10.5 , 9.1339746],
[ 10.5 , 10. ],
[ 10.75 , 9.5669873],
[ 10.25 , 9.5669873],
[ 2. , 2. ],
[ 3. , 2. ],
[ 2.5 , 1.1339746],
[ 2.5 , 2. ],
[ 2.75 , 1.5669873],
[ 2.25 , 1.5669873]])
exclude = [[2., 2.], [3., 2.], [2.5, 2.]]
mask = np.logical_or.reduce(
[np.logical_and.reduce(
[points[:,idx] == ex[idx] for idx in range(len(ex))]) for ex in exclude])
new_points = points[~mask]
print(new_points)
prints
[[ 10. 10. ]
[ 11. 10. ]
[ 10.5 9.1339746]
[ 10.5 10. ]
[ 10.75 9.5669873]
[ 10.25 9.5669873]
[ 2.5 1.1339746]
[ 2.75 1.5669873]
[ 2.25 1.5669873]]
You can also view the 2D array as a 1D array and then use np.in1d.
#Using @unutbu array's.
def view_1d(arr):
return arr.view(np.dtype((np.void,arr.dtype.itemsize * arr.shape[1])))
points_1d=view_1d(points)
exclude_1d=view_1d(exclude)
print points[~np.in1d(points_1d,exclude_1d)]
[[ 10. 10. ]
[ 11. 10. ]
[ 10.5 9.1339746]
[ 10.5 10. ]
[ 10.75 9.5669873]
[ 10.25 9.5669873]
[ 2.5 1.1339746]
[ 2.75 1.5669873]
[ 2.25 1.5669873]]
Just to double check the trickery is working and some ballpark timings:
points=np.random.rand(1E6,2)
points=np.around(points,1)
exclude=np.random.rand(1E2,2)
exclude=np.around(exclude,1)
t = time.time()
mask1 = ~(np.in1d(view_1d(points),view_1d(exclude)))
print time.time()-t
#0.469238996506
t = time.time()
mask2 = ~np.logical_or.reduce(
[np.logical_and.reduce(
[points[:,idx] == ex[idx] for idx in range(len(ex))]) for ex in exclude])
print time.time()-t
#7.13628792763
#Just to check this is doing what I think its doing.
print np.all(mask1==mask2)
True
Timings are just for generating the mask. Both methods appear to scale similarly, I just showed large arrays to (hopefully) compensate for not using timeit.
Your code as posted is not syntactically valid.
It produces (once corrected) your desired result:
--
[[10.0, 10.0],
[11.0, 10.0],
[10.5, 9.1339746],
[10.5, 10.0],
[10.75, 9.5669873],
[10.25, 9.5669873],
[2.5, 1.1339746],
[2.75, 1.5669873],
[2.25, 1.5669873]]
See: http://ideone.com/7LOpa6
points = [[ 10. , 10. ],
[ 11. , 10. ],
[ 10.5 , 9.1339746],
[ 10.5 , 10. ],
[ 10.75 , 9.5669873],
[ 10.25 , 9.5669873],
[ 2. , 2. ],
[ 3. , 2. ],
[ 2.5 , 1.1339746],
[ 2.5 , 2. ],
[ 2.75 , 1.5669873],
[ 2.25 , 1.5669873]]
exclude = [[2., 2.], [3., 2.], [2.5, 2.]]
print [p for p in points if p not in exclude]
Your answer entirely misses the problem. At least one of the objects involved is a numpy array. in does bizarre things that actually do produce the OP's problem for arrays.
@user2357112 None of the objects involved in the question is a numpy array. You're complaining that I haven't formulated a different question, and answered that.
The demonstrated behavior matches numpy's bizarre array __contains__ semantics, and the OP explicitly mentions arrays and used a numpy tag. Why would you assume there are no arrays involved?
| common-pile/stackexchange_filtered |
Digitize graphs. Pull out data from points in a scatter plot
I have a project where I would like to extract data from a series of scatter plots that are image files (Jpeg or png). The plots are similar but the axes scales are not always exactly the same. I have looked online and there are some web app solutions that are very “manual” and require a lot of clicking. This would get tiresome if there are a lot of graphs.
Does anyone know of any solutions that are partly or wholly automated? A python API solution would be ideal but other languages are also ok.
Digitizing Graph. I know a tool that can semi-automatically extract data, (scattered points and curves) from graph images within a few clicks. But it is not in Python.
It is PlotDigitizer.com. I have used the tool and based on my experience it is better than some others I have used earlier.
Here is a simple guide to use it:
Upload the image graph
Calibrate both axes graph - you have to do it manually
Pick the color of points or curves you want to extract
Select auto-extracting algorithms, e.g. points, curves
Done. You have extracted the data points.
You can find a more detailed guide on its website: plotdigitizer.com.
| common-pile/stackexchange_filtered |
orange screen with vertical stripes
After having installed Ubuntu without Internet conecction, I restart the PC, go to BIOS, change boot priority to internal harddrive, save and exit. Then the PC starts again but the screen gets orange with vertical stripes and stays like that. I tried both versions 64 bit and 32 bit 12.04. I can't use internet before having Ubuntu, because y have to make a network configuration(this is not allowed in the trial version before the installed version).
I have just installed the 32 bits version but happened exactly like you, start Ubuntu in recovery mode from grub, there start session in low graphics mode (I don't remember the name exactly) and install the graphic drivers, that solved my problem for now, if not, do it from a terminal, from recovery mode too, it may work to you.
Curiously, I think Ubuntu works behind that screen, because I wrote the sudo shutdown now in a terminal, without being able to see it, and worked. Of course, I knew all the steps from memory, if not, it would have been impossible.
Are you sure that it is Ubuntu that is making this problem? Do you see the screen getting orange before (meaning you can't see BIOS/Grub/Ubuntu Boot), during, or after BIOS/Grub/Ubuntu boot?
Try running the PC with an external monitor and see if you get the same result displaying on the monitor. It could very well be that coincidentally your computer's screen messed up around the same time of your Ubuntu installation?
I don't know that much, just giving you some ideas.
nope, i can still reboot from USB and run the trial version and it looks perfect, its just when I run the installed version when it boots to orange screen.
Well then we can rule out hardware failure. One less thing to worry about, I suppose. Beyond that, I can't really help you. Hope you find someone more knowledgable than me to help you.
I istalled Ubuntu 12.10 now and got the same result.
I think Ubuntu is not compatible with the hard drive, thats why the trial version that runs on the USB or CD works fine but the installed version that runs on the hard drive doesn't.
My hard drive is --Hitachi Deskstar 7K1000.C 500GB SATA II 16MB--
Can someone confirm this is the reason?
I start Ubuntu in recovery mode, and update every thing the system suggested! Restart the computer, it fixed!
And what were those things? Why did it solve the problem?
| common-pile/stackexchange_filtered |
ASP.NET site memory use up to 99% if IIS app pool NOT enable 32-bit applications
We have a ASP.NET MVC3 site on a 8GB memory Windows server 2019, when I enable 32bit applications on of IIS app pool, the site running well, cpu and memory use(about 300MB) of w3wp.exe also low and noraml. But if I disable 32bit applications of IIS app pool, the memory use will rise to 99%. Anyone knows why?
I dumped memory when up to 99% by perfview, it says System.Globalization object consumed the memory, but I'm not sure. Please help, Thanks in advance!
you may want to search existing Q&A's and potential pose this question to the sister site ServerFault community as it pertains to administering and monitoring. please review the ServerFault on-topic. Having said, I wouldn't necessarily construe this as off-topic to SO.
as a wild guess, does your MVC application use a bundler that is registered as an IIS or ASP.NET Module?
@BrettCaswell no bundler
Could you please paste a screenshot with all columns included (Inc %, Inc etc.)? And please clear the GroupPats text box so Perfview will do no grouping. Also, which type of memory are you checking? Is it private bytes?
dumped w3wp.exe process memory, see 2nd image.
| common-pile/stackexchange_filtered |
How Should we use comma with present participles acting as adverbial clauses?
Compare:
This portrait was drawn using watercolors. (PP at the end, Passive Voice)
Using watercolors, the artist drew this portrait. (PP at the front, Active Voice)
The artist drew this portrait, using watercolors. (PP at the end, Active Voice)
I get confused when I want to use present participles acting as adverbs. How we should use comma in these cases? In terms adverbial clauses with subordinate conjunctions, I know how I should use comma. It is very straight forward.
Similar things on the Internet:
In this link, and on this page, there is a bunch of discussions in the case of the present participles coming at the end of a sentence.
Maybe "This picture was painted in watercolor/with watercolors?" or "This painting was done ...?" The first sentence sounds somewhat unnatural in a passive voice. Don't you think so?
@Rompey They are just examples concocted by me! The key idea is about the comma. Perhaps, I constructed bad examples.
Yes, but you're using passive voice together with PP which is supposed to modify the subject .
The first example seems fine. It sounds like some is explaining the details of a painting, like at an art gallery. Although, maybe use "watercolors". I'm not sure though. They both seem fine to me.
@probablyme To me, it sounds like the picture, when it was being painted, was using watercolors by itself.
The first sentence is awkward since drawings are drawn, and paintings are painted. "The painting was done using/in watercolours" sounds more natural, but it's only an example for syntax.
@Rompey I don't know. I mean I don't read it that way. Maybe because logically, a painting cannot draw itself, so my mind discards that possibility. So maybe you are right that you can read it that way. To Cardinal, example two also seems fine, though maybe "colors". In the third one, you are missing something between drew and _painting, like a/the/this. Also, I think it is wrong to use a comma, but I have no proof. Also, Peter brings up another good point. I didn't occur to me because the issue is the comma, not the verbs.
@Cardinal I think that "The artist painted this picture using watercolors" as a non-ambiguous example sentence wouldn't arise any debate at all. My opinion, only.
@Rompey peter, and probablyme: Thank you guys, I have edited the post.
I am really passionate to know why this question deserves a "Down-Vote". It is interesting.
As far as commas are concerned, the first two sound correct to me, while the last one does not. Someone with more knowledge of formal grammar rules might be able to give a more exact answer, but to me
The artist painted this portrait using watercolors
Sounds correct.
I would guess this has less to do with the difference between restrictive and non-restrictive clauses, like in the link you posted, and more to do with the word "using". In your example, I believe "using" is acting as a preposition and not a participle. For example, the sentence is equally valid as:
The artist painted this portrait with watercolors.
Similarly:
The policeman stopped the criminal using a stun gun.
could be understood as
The policeman stopped the criminal with a stun gun.
Importantly, there is ambiguity in the first one about whether "using" is a participle, which would mean the criminal is using a stun gun, or a preposition, so in this case using "with" would be preferable. Because a portrait can clearly not "use" watercolors, 'using' only makes sense as a preposition.
Good question, though.
Thanks, such ambiguities in using reduced clauses or present participles really bother me and make me hesitate about correctness of the sentence. Consequently, I forced to use Ngram and google search. Is there any chance that previous contents overwhelm such ambiguities? It was my question for a while.
"Is there any chance that previous contents overwhelm such ambiguities?" I'm not quite sure what you mean by this. Can you rephrase it?
with a gun or using a gun, if placed after "criminal", could be taken to mean that the criminal had the gun. If you want to make clear that it was the policeman who used the gun to stop the criminal, you would relocate the phrase: "Using a gun, the policeman stopped the criminal" or "The policeman, using a gun, stopped the criminal" or better yet, "The policeman used a gun to stop the criminal".
Oh sorry, I mean the can such ambiguity be overwhelmed by the previous sentences in the context?
I think so, yes. For example, if a news story said: "Police arrested a bank robber without incident today. A policeman stopped the robber using his stun gun". Technically that ambiguity still exists, but it would almost certainly be understood just fine.
| common-pile/stackexchange_filtered |
How do i rerender my screen after deletion from list?
Hello I've been looking through several threads on stackoverflow but I haven't been able to solve my problem. I have an app where you can save movies to a watchlist. On this specific screen I want to display a users watchlist and give them the ability to delete it from the list. Currently the function is indeed deleting the movie from the list and removing it from firebase but i can't get my screen to rerender to visually represent the deletion.
This is the code right now:
export default function MovieWatchlistTab(props: any) {
let { movies } = props;
let movieWatchlist: any[] = [];
const [watchlistSnapshot, setWatchlistSnapshot] = useState();
const user: firebase.User = auth().currentUser;
const { email } = user;
const watchlistRef = firestore().collection("Watchlist");
useEffect(() => {
getWatchlistSnapshot();
}, []);
const getWatchlistSnapshot = async () => {
setWatchlistSnapshot(await watchlistRef.where("userId", "==", email).get());
};
const convertDataToArray = () => {
const convertedMovieList = [];
for (let movie in movies) {
let newMovie = {
backdrop: movies[movie].backdrop,
overview: movies[movie].overview,
release: movies[movie].release,
title: movies[movie].title,
};
convertedMovieList.push(newMovie);
}
movieWatchlist = convertedMovieList;
};
const renderMovieList = () => {
convertDataToArray();
return movieWatchlist.map((m) => {
const handleOnPressDelete = () => {
const documentRef = watchlistRef.doc(watchlistSnapshot.docs[0].id);
const FieldValue = firestore.FieldValue;
documentRef.set(
{
movies: {
[m.title]: FieldValue.delete(),
},
},
{ merge: true }
);
movieWatchlist.splice(
movieWatchlist.indexOf(m),
movieWatchlist.indexOf(m) + 1
);
console.log("movieWatchlist", movieWatchlist);
};
const swipeButtons = [
{
text: "Delete",
color: "white",
backgroundColor: "#b9042c",
onPress: handleOnPressDelete,
},
];
return (
<Swipeout right={swipeButtons} backgroundColor={"#18181b"}>
<View key={m.title} style={{ marginTop: 10, flexDirection: "row" }}>
<Image
style={{ height: 113, width: 150 }}
source={{
uri: m.backdrop,
}}
/>
<View>
<Text
style={{
flex: 1,
color: "white",
marginLeft: 10,
fontSize: 17,
}}
>
{m.title}
</Text>
<Text style={{ flex: 1, color: "white", marginLeft: 10 }}>
Release: {m.release}
</Text>
</View>
</View>
</Swipeout>
);
});
};
return (
<View
style={{
flex: 1,
justifyContent: "center",
alignItems: "center",
backgroundColor: "#18181b",
}}
>
<ScrollView
style={{ flex: 1 }}
contentContainerStyle={{
width: Dimensions.get("window").width,
}}
>
{renderMovieList()}
</ScrollView>
</View>
);
}
I've been trying to play around with useStates and I think the answer is in that direction but I just can't seem to get it to work anyway. Any help would be appreciated!
before removing from firebase, filter the item from the array and show the filtered array
There are a few lines in your code that show a misunderstand of React state. You have a value let movieWatchlist: any[] = []; that you reassign in convertDataToArray() and mutate in handleOnPressDelete. That's just not how we do things in React and it's not going to trigger updates properly. movieWatchlist either needs to be a stateful variable created with useState.
Do the movies passed in through props change? If they do, then you don't need to store them in state here. You could just return an array from convertDataToArray() rather than setting a variable and returning void.
To be honest it's really not clear what convertDataToArray is even doing as it seems like newMovie is either the same or a subset of the original movie object. If the point is to remove the other properties aside from these four, that's not actually needed. If the prop movies is already an array, just delete this whole function and use movies directly. If it's a keyed object, use Object.values(movies) to get it as an array.
I'm thoroughly confused as to what we are getting from props and what we are getting from firebase. It seems like we would want to update the snapshot state after deletion, but you only run your useEffect once on mount.
You may still have errors, but this code should be an improvement:
interface Movie {
backdrop: string;
overview: string;
release: string;
title: string;
}
const MovieThumbnail = (props: Movie) => (
<View key={props.title} style={{ marginTop: 10, flexDirection: "row" }}>
<Image
style={{ height: 113, width: 150 }}
source={{
uri: props.backdrop
}}
/>
<View>
<Text
style={{
flex: 1,
color: "white",
marginLeft: 10,
fontSize: 17
}}
>
{props.title}
</Text>
<Text style={{ flex: 1, color: "white", marginLeft: 10 }}>
Release: {props.release}
</Text>
</View>
</View>
);
export default function MovieWatchlistTab() {
const [watchlistSnapshot, setWatchlistSnapshot] = useState<DocumentSnapshot>();
const user: firebase.User = auth().currentUser;
const { email } = user;
const watchlistRef = firestore().collection("Watchlist");
const getWatchlistSnapshot = async () => {
const results = await watchlistRef.where("userId", "==", email).get();
setWatchlistSnapshot(results.docs[0]);
};
useEffect(() => {
getWatchlistSnapshot();
}, []);
const deleteMovie = async (title: string) => {
if ( ! watchlistSnapshot ) return;
const documentRef = watchlistRef.doc(watchlistSnapshot.id);
const FieldValue = firestore.FieldValue;
await documentRef.set(
{
movies: {
[title]: FieldValue.delete()
}
},
{ merge: true }
);
// reload watch list
getWatchlistSnapshot();
};
// is this right? I'm just guessing
const movies = ( watchlistSnapshot ? watchlistSnapshot.data().movies : []) as Movie[];
return (
<View
style={{
flex: 1,
justifyContent: "center",
alignItems: "center",
backgroundColor: "#18181b"
}}
>
<ScrollView
style={{ flex: 1 }}
contentContainerStyle={{
width: Dimensions.get("window").width
}}
>
{movies.map((m) => (
<Swipeout
right={[
{
text: "Delete",
color: "white",
backgroundColor: "#b9042c",
// need to pass the title to the delete handler
onPress: () => deleteMovie(m.title)
}
]}
backgroundColor={"#18181b"}
>
<MovieThumbnail {...m} />
</Swipeout>
))}
</ScrollView>
</View>
);
}
| common-pile/stackexchange_filtered |
SharePoint Online New Team Site for groups
When publishing news in a Group site it will showcase it under News (like this) what is the logic behind this??
In here it will publish only the news. Even if I create a page. It won't show there.
I need a code clarification.
If you download both pages and inspect the contents, you will find that in the news page , you have PromotedState value as 2.
The contents in the news page are as below:
<mso:PromotedState msdt:dt="string">2.00000000000000</mso:PromotedState>
Where as in case of site page, its is as below:
<mso:PromotedState msdt:dt="string">0</mso:PromotedState>
Right now, you can only provision news page via the news list/news headlines webpart (not sure programmatically). If you create it via Site Contents > Site Pages, it will create a simple site page only which wont be visible inside news webpart since the PromotedState value is 0 in its case.
Reference - What makes a page to be a news page on SharePoint Online
Modern pages and news pages
| common-pile/stackexchange_filtered |
XeTex on ubuntu
I got an error
(/usr/share/texmf/tex/latex/beamer/base/themes/o/beamerouterthemedefault.st
y))) (/usr/share/texmf-texlive/tex/xelatex/fontspec/fontspec.sty
!
********************************************
* XeTeX is required to compile this document.
* Sorry!
********************************************.
\RequireXeTeX ...********************************}
\endgroup \fi
l.18 \RequireXeTeX
But in the command line I can call xelatex and xetex.
It looks like the problem is with lexlive, but I have installed it.
What can be the problem?
Are you starting the TeX processor out of an editor? Normally those use latex or pdftex as default engines, you need to change it to xelatex or xetex in the editor. If you are using TeX Works coming with TeX Live, there is a dropdown menu next to the "compile button". It is no problem with TeX Live, as it is working on the command line, your editor is just using the wrong engine by default for this file.
to compile I use lualatex, do you know how can I set it up?
Could you please post a full example of your code? It looks like it is an fontspec error, but fontspec should work with lualatex…
We need more detail of what is actually being done: you don't get this error 'by accident'.
| common-pile/stackexchange_filtered |
Using javascript or python regular expressions I want to determine if the number is 1-99 how to make it?
var category = prompt("where do you go? (1~99)", "");
hello
Using regular expressions I want to determine if the category is 1-99.
How can I solve it?
Thank you if you let me know.
Why not simply use a if statement ? i.e. if( x >= 1 && x <= 99)
You can use character classes to match digits, like this [0-9]. If you put two of them together you'll match 00 - 99. If you put a ? after one of them, then it's optional, so you'll match 0 - 99. To enforce 1-99, make the non-optional one like this [1-9]. Finally, you need to make sure there's nothing before or after the one or two digits using ^, which matches the beginning of the string, and $ which matches the end.
if (category.match(/^[1-9][0-9]?$/)){
console.log("ok")
} else {
console.log("not ok")
}
Note that match requires a regex, not a string. It will be implicitly converted, but it's better practice to use a regex /^[0-9][1-9]?$/.
Very elegant regex BTW, +1.
Thanks for letting me know my question. It works well. thank you~!
In JavaScript you can use test() method with RE for 1-99 as shown below:
var one_to_ninetynine = /^[1-9][0-9]?$/i;
if(one_to_ninetynine.test(category)) {
console.log("The number is between 1-99");
} else {
console.log("The number is NOT between 1-99");
}
thank you for good answer I'll try it too It seems intuitive too
| common-pile/stackexchange_filtered |
Visual Level Filter Equal To Column Value
I have a measure in my date table that calculates the week number of the year for last week. LastWeekNum = WEEKNUM(TODAY(), 1) -1
I want to apply this to a visual, so logically: WeekOfYear = LastWeekNum.
Is there some way to set this up?
This is possible to do with some DAX and model editing, but I'd first try using Relative date filtering in the visual level filter settings to see if you can do it the easy way.
If that doesn't work, I'd suggest creating a Boolean column on your date table and using that column to filter your visual instead. Visual level filter, Basic filtering, IsLastWeekNum is 1
IsLastWeekNum = IF(WEEKNUM(DateTable[Date]) = [LastWeekNum], 1, 0)
Thanks for the tip, but unfortunately thats not what I need in my case.
The end result I'm building will have sales totals for 2016,2017,2018 for a specific week of the year, so I cannot use relative date filtering.
| common-pile/stackexchange_filtered |
save text file more then 8kb (80 lines) fwrite php
I am trying rewrite result.txt file after delete first row, which have total 7000 rows (700 kb) size.
Simply open the file, delete the first line then save again.
But fwrite function only save 80 rows (8kb) for me. And remaining rows auto deleted.
$outfile= "result.txt";
$o = fopen($outfile,"w");
while (!feof($handle)) {
$buffer = fgets($handle,2048);
fwrite($o,$buffer);
}
fclose($handle);
fclose($o);
rename($outfile, 'result.txt');
How to write big file with fwrite?
Possible duplicate of PHP fwrite() for writting a large string to file
@RussJ I try already but its not solved my problem
Buffer size should be set on fwrite invokation as well, however, PHP actually should return a properly terminated string.
Might there be some limitation on your file system?
have you tried to replace "w" to "w+" or "wa+" on your fopen()?
@Roshan the strange thing here is that oviously the loop must run 4 times (4*2048 = 8k)
@Roshan w+ also not working
@Quasimodo'sclone system is ok
Have you tried to to write a large file with some fixed string in a loop? You do not show how you open $handle. Is it an upload file?
Try also filesize('source-file.txt'), output a counter within the loop an var_dump(feof($handle)) immediately after the loop. Add the outputs to your question.
@Quasimodo'sclone This is the whole code, Where I get first line URL and work with this, And after that I delete the first row URL and rewrite file.
Problem solved by replacing w to c+
Now code looks like this
$outfile= "result.txt";
$o = fopen($outfile,"c+");
while (!feof($handle)) {
$buffer = fgets($handle,2048);
fwrite($o,$buffer);
}
fclose($handle);
fclose($o);
rename($outfile,$file);
| common-pile/stackexchange_filtered |
Error using bees with machineguns
I have tried to use beeswithmachineguns for loadtesting a site but with little success. There is a similar post but the error I receive is slightly different.
I use the following to start the bees.
bees up -s 1 -k Bees-West -g SWARM
The error I am getting is related to the groupId being empty but I am passing that value in with -g SWARM... or so I thought. My setup uses the us-west-2 region, which I have in my .boto file as noted in the linked post.
Connecting to the hive.
Attempting to call up 1 bees.
Traceback (most recent call last):
File "/usr/local/bin/bees", line 5, in <module>
main.main()
File "/Library/Python/2.7/site-packages/beeswithmachineguns/main.py", line 127, in main parse_options()
File "/Library/Python/2.7/site-packages/beeswithmachineguns/main.py", line 111, in parse_options
bees.up(options.servers, options.group, options.zone, options.instance, options.login, options.key)
File "/Library/Python/2.7/site-packages/beeswithmachineguns/bees.py", line 104, in up
placement=zone)
File "/Library/Python/2.7/site-packages/boto/ec2/connection.py", line 618, in run_instances
return self.get_object('RunInstances', params, Reservation, verb='POST')
File "/Library/Python/2.7/site-packages/boto/connection.py", line 699, in get_object
raise self.ResponseError(response.status, response.reason, body)
boto.exception.EC2ResponseError: EC2ResponseError: 400 Bad Request
<?xml version="1.0" encoding="UTF-8"?>
<Response><Errors><Error><Code>InvalidParameterValue</Code><Message>Value () for parameter groupId is invalid. The value cannot be empty</Message></Error></Errors><RequestID>bd1c6bcb-875f-4294-af64-84853b5b258a</RequestID></Response>
You need to be sure to use the correct combination of subnet (-v) and security group (-g).
Are the instances bees utilizes ones I have setup or are they created dynamically?
I have an instance in us-west-2a, but the error I get when attempting to run bees is "Invalid availability zone: [us-west-2a]"
I don't see subnet(-v) listed as a parameter in the bees help, only zone (-z) is that the parameter you are referring to?
The up command has a -v (or --subnet) option for "The vpc subnet id in which the instances should be launched. (default: None)." The bees are dynamically launched. Not sure why you see issues with us-west-2a.
I suspect I am running in to the outdated installation instructions noted here: https://github.com/newsapps/beeswithmachineguns/issues/106. I will try uninstalling from pip and start fresh from source.
@MarcusF if that solves it, please post here. If it doesn't, please post your most recent error.
Can you please make sure that the group "SWARM" is present in AWS by Clicking on Ec2->Security groups at the left side->check the group name.
| common-pile/stackexchange_filtered |
How to set check box checked by comparing the database value in struts1.3?
In my Struts1.3 application, I have a Jsp page where i have a hyperlink on clicking of that hyperlink i am showing all the records that is coming from database in checkbooks mycode is to showing all the values are as follows
<table width="932" border="1" align="left" cellpadding="0" cellspacing="0" bordercolor="#E9E9E9">
<%for (int i = 0; i < landlordList.size(); i++) {%>
<tr>
<td width="50" align="left">
<table width="30" border="0" align="center" cellpadding="0" cellspacing="0">
<tr>
<td width="20"><label>
<html:checkbox value="<%=landlordList.get(i).getLandlordId()%>" property="landlordId"
name="ExporterForm" styleId="landlordId"/>
</label>
</td>
</tr>
</table>
</td>
<td class="landlord_name"><%=landlordList.get(i).getLandlordname()%></td>
</tr>
<%}%>
</table>
and property="landlordId" is defined as a String[] in my action class. And i have one more list2 which contain few records my query is. How can i compare the list2 with landlordList if landlordList conatains any record of list2 then mark corresponding to these records as checked. please help me.
I got the solution by using multibox
<logic:iterate name="<%=Constant.LANDLORDLIST%>" id="customer">
<tr>
<td width="50" align="left"><table width="30" border="0" align="center" cellpadding="0" cellspacing="0">
<tr>
<td width="20">
<label>
<html:multibox property="landlordid" name="ExporterForm">
<bean:write name="customer" property="value" />
</html:multibox>
</label>
/td>
</tr>
</table></td>
<td class="landlord_name"><bean:write name="customer" property="label" /></td>
</tr>
</logic:iterate>
By using multibox and logic Iterate, I am able to solve this.
| common-pile/stackexchange_filtered |
Exported arrow to PDF or EPS messes up its tip
The code
g = Graphics[{AbsoluteThickness[20], Arrowheads[0.5], Arrow[{{0, 0}, {1, 1}}]}, PlotRange -> {{0, 1.1}, {0, 1.1}}]
Export["Arrow.eps", g];
produces an image with a corrupted arrow tip. I'm using version 12.1 on MacOS 11.1.
Here's the result I get
How can I work around this problem?
Edit: Tried in Windows 10: Same problem.
I have reported a very similar bug before. Please do report it to Wolfram. If you open it with an editor like Illustrator, you will see that the arrowhead has an outline (stroke) of non-zero width which is responsible for this problem. It should not.
You can export a rasterized version. Or you can open the output in a graphics editor and remove the strong from the arrowheads. These are of course not ideal. I would try creating my own custom arrowhead so I can control the stroke. Let us know if that approach worked.
Does this answer your question? How to stop Mathematica 12.1 from chopping off the axes arrows?
It would be nice to see this problem solved without so many workarounds, though. I think the answer there from HD might work for you?
Who/What is HD?
I think they’re referring to the username for the person that gave an answer in the link which is HD2006.
| common-pile/stackexchange_filtered |
generate unique id for array value groups
I have an asosiative array which contains data about teams and players.
Example:
$arr = array(
array('teamID'=> '','teamName' => 'USA', 'playerName'='John'),
array('teamID'=> '','teamName' => 'USA', 'playerName'='Peter'),
array('teamID'=> '12','teamName' => 'Norway', 'playerName'='Zigmund'),
array('teamID'=> '','teamName' => 'USA', 'playerName'='Parker'),
array('teamID'=> '','teamName' => 'Norway', 'playerName'='Jan'),
array('teamID'=> '','teamName' => 'USA', 'playerName'='Hector'),
array('teamID'=> '','teamName' => 'Germany', 'playerName'='Alexander'),
array('teamID'=> '','teamName' => 'Slovakia', 'playerName'='Ivan')
);
I want to generate unique ID for each team if it is not present, if the id is present for some team use it on same team names if they dont exist there, and do not use id's which already exists.
What I have did is simple check if not exists ad index of the foreach loop, bet then it is per player not per team.
Expected outcome:
$arr = array(
array('teamID'=> '1','teamName' => 'USA', 'playerName='John'),
array('teamID'=> '1','teamName' => 'USA', 'playerName'='Peter'),
array('teamID'=> '12','teamName' => 'Norway', 'playerName'='Zigmund'),
array('teamID'=> '1','teamName' => 'USA', 'playerName'='Parker'),
array('teamID'=> '12','teamName' => 'Norway', 'playerName'='Jan'),
array('teamID'=> '1','teamName' => 'USA', 'playerName'='Hector'),
array('teamID'=> '2','teamName' => 'Germany', 'playerName'='Alexander'),
array('teamID'=> '3','teamName' => 'Slovakia', 'playerName'='Ivan')
);
Any ideas on how to solve this?
Your question is bit unclear. can you add your expected outcome in your question
Put the team names you encounter while looping over this data into an array, so that you can check whether the current team name already exists in that array …?
@CBroe yup that does, but it wont check the teams which have ID from before
From where 1,2,3 are coming for teamID?
@AlivetoDie they should be auto generated, but grouped
@Giedrius " it wont check the teams which have ID from before" . To me that sounds exactly like what CBroe's suggestion would do. You check against the previous teams you encountered while looping.
“but it wont check the teams which have ID from before” - yeah well then put the existing id into the array if there is one, else create a new one …?
@CBroe, good idea, thanks!
@Giedrius Have you realized that your accepted answer is not correct?
This would solve your problem (as one of many possible solutions).
Here we have an array holding each team name as a key, and an incremented numerical ID for every occurence of a new team name. Then we check if the key exists, if it does, we reuse the ID that is assigned to it. If it doesn't exist, we create it and add an ID, and then increment the integer.
$teams_with_ids = [];
$teamids = [];
$i=0;
foreach( $arr AS $team ){
if( array_key_exists($team['teamName'], $teamids) ){
$team['teamID'] = $teamids[$team['teamName']];
} else {
$teamids[$team['teamName']] = $i;
$team['teamID'] = $i;
$i++;
}
array_push($teams_with_ids, $team);
}
EDIT:
As pointed out in the comment, the above solution did not account for existing ID's on some teams. This does:
$teams_with_ids = [];
$teamids = [];
$existing_ids = array_filter((array_map(function($team){ if( !empty( $team['teamID'] ) ) return intval($team['teamID']); },$arr)));
$i=0;
foreach( $arr AS $team ){
if( array_key_exists($team['teamName'], $teamids) ){
$team['teamID'] = $teamids[$team['teamName']];
} else {
if( in_array( $i, $existing_ids ) ) $i++; // Adding +1 to $i since the ID is already taken
$teamids[$team['teamName']] = (!empty($team['teamID']) && in_array($team['teamID'], $existing_ids)) ? $team['teamID'] : $i;
$team['teamID'] = (empty($team['teamID'])) ? $i : $team['teamID'];
if( empty($team['teamID'] ) ) $i++;
}
array_push($teams_with_ids, $team);
}
This one does not preserve the already existing ID for Norway.
Good spot @MarcusKreusch I actually didn't see that he had one with an already assigned ID.
This manipulates the original array directly and adds the “missing” IDs:
$teams = [];
$id_counter = 1;
$teamids = [];
foreach($arr as $entry) {
$teamids[] = $entry['teamID'];
}
array_unique($teamids);
foreach($arr as &$entry) {
if(!isset($teams[$entry['teamName']])) {
if($entry['teamID'] == '') {
while(in_array($id_counter, $teamids)) {
$id_counter++;
}
$teamids[] = $id_counter;
array_unique($teamids);
$teams[$entry['teamName']] = $id_counter;
}
else {
$teams[$entry['teamName']] = $entry['teamID'];
$teamids[] = $entry['teamID'];
array_unique($teamids);
}
}
$entry['teamID'] = $teams[$entry['teamName']];
}
unset($entry);
This one does not check for the already existing keys - if Norway would have had ID=2 you produce a duplicate ID
@MarcusKreusch you’re right, I modified the solution to take that into account as well now.
Your method doesn't accommodate a possible set of data: http://sandbox.onlinephpfunctions.com/code/2ceac66c51954817205e526eee42b7c4089ad24b
I think the correct solution will be this one - none of the others I tried worked as expected.
$arr = array(
array('teamID'=> '', 'teamName' => 'USA', 'playerName'=>'John'),
array('teamID'=> '', 'teamName' => 'USA', 'playerName'=>'Peter'),
array('teamID'=> '12', 'teamName' => 'Norway', 'playerName'=>'Zigmund'),
array('teamID'=> '', 'teamName' => 'USA', 'playerName'=>'Parker'),
array('teamID'=> '', 'teamName' => 'Norway', 'playerName'=>'Jan'),
array('teamID'=> '', 'teamName' => 'USA', 'playerName'=>'Hector'),
array('teamID'=> '', 'teamName' => 'Germany', 'playerName'=>'Alexander'),
array('teamID'=> '', 'teamName' => 'Slovakia', 'playerName'=>'Ivan'),
);
function getTeamIdFromName($arr, $teamName){
foreach($arr as $element){
if($element["teamName"] == $teamName && !empty($element["teamID"])){
return $element["teamID"];
}
}
return false;
}
function getNewTeamId($arr){
$existingIds = array_unique(array_column($arr, 'teamID'));
$id = 1;
while(in_array($id, $existingIds)) $id++;
return $id;
}
foreach($arr as $k=>$element){
if(empty($element['teamId'])){
if(!($id = getTeamIdFromName($arr, $element["teamName"]))){
$id = getNewTeamId($arr);
}
$arr[$k]['teamID'] = $id;
}
}
Please note that you should use quotes for your array keys and the ">" for the player names where missing.
To avoid performing iterated lookups for teamID values while traversing your input array, it is best practice to generate a lookup array first / separately.
Creating the lookup array is certainly more tedious than applying it. I've commented the temporary array values to help you to understand what is generated at each step. Using relevant variable names and array functions (which improve code comprehension), I think it shouldn't be too hard to follow.
For those that are unable to compare code performance, MarcusKreusch's answer is currently the only other answer that provides the correct results. However, it is doing two scans (within the custom function calls) of the input array on each iteration of the input array. My solution is more direct and efficient because it uses fewer iterated function calls / loops / conditions.
Code: (Demo)
$lookup=array_column($arr,'teamID','teamName'); // var_export($lookup); // ['USA'=>'','Norway'=>'','Germany'=>'','Slovakia'=>'']
$positive_ids=array_filter(array_flip(array_column($arr,'teamName','teamID'))); // var_export($positive_ids); // ['Norway'=>12]
$i=0;
foreach($lookup as $name=>&$id){
if(isset($positive_ids[$name])){
$id=$positive_ids[$name];
}else{
while(in_array(++$i,$positive_ids)); // avoid collisions between existing and new ids
$id=$i;
}
} // var_export($lookup); // ['USA'=>1,'Norway'=>12,'Germany'=>2,'Slovakia'=>3]
foreach($arr as &$row){
$row['teamID']=$lookup[$row['teamName']]; // make id assignments
}
Result: (modified $arr now contains...)
array(
array('teamID'=> 1,'teamName' => 'USA', 'playerName'=>'John'),
array('teamID'=> 1,'teamName' => 'USA', 'playerName'=>'Peter'),
array('teamID'=> 12,'teamName' => 'Norway', 'playerName'=>'Zigmund'),
array('teamID'=> 1,'teamName' => 'USA', 'playerName'=>'Parker'),
array('teamID'=> 12,'teamName' => 'Norway', 'playerName'=>'Jan'),
array('teamID'=> 1,'teamName' => 'USA', 'playerName'=>'Hector'),
array('teamID'=> 2,'teamName' => 'Germany', 'playerName'=>'Alexander'),
array('teamID'=> 3,'teamName' => 'Slovakia', 'playerName'=>'Ivan')
)
I want to clarify that my solution appropriately handles two possible and troublesome input arrays:
Issue: Gaps in incremented ids
$arr = array(
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'John'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Peter'),
array('teamID'=> '','teamName' => 'Norway', 'playerName'=>'Zigmund'),
array('teamID'=> '','teamName' => 'Slovakia', 'playerName'=>'Ivan'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Parker'),
array('teamID'=> '12','teamName' => 'Norway', 'playerName'=>'Jan'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Hector'),
array('teamID'=> '','teamName' => 'Germany', 'playerName'=>'Alexander')
);
Upon close inspection, you will see that the first occurrence of Norway is without an id. Any method that is looping the array to assign new keys will deem Norway in need of an incremented id. Since Norway comes after USA (which claims 1), Norway's id is given 2. Then Slovakia is given 3. Then the id for Norway is overwritten as 12. Finally, Germany is given 4. This leaves gaps in the incrementation.
Issue: Collision between existing and new ids
$arr = array(
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'John'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Peter'),
array('teamID'=> '2','teamName' => 'Norway', 'playerName'=>'Zigmund'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Parker'),
array('teamID'=> '','teamName' => 'Norway', 'playerName'=>'Jan'),
array('teamID'=> '','teamName' => 'USA', 'playerName'=>'Hector'),
array('teamID'=> '','teamName' => 'Germany', 'playerName'=>'Alexander'),
array('teamID'=> '','teamName' => 'Slovakia', 'playerName'=>'Ivan')
);
Without a check for id collisions, the above array will generate two teams with 2 as the id.
Not the best way, but works :
$arr = array(
array('teamID' => '', 'teamName' => 'USA', 'playerName' => 'John'),
array('teamID' => '', 'teamName' => 'USA', 'playerName' => 'Peter'),
array('teamID' => '12', 'teamName' => 'Norway', 'playerName' => 'Zigmund'),
array('teamID' => '', 'teamName' => 'USA', 'playerName' => 'Parker'),
array('teamID' => '4', 'teamName' => 'Norway', 'playerName' => 'Jan'),
array('teamID' => '', 'teamName' => 'USA', 'playerName' => 'Hector'),
array('teamID' => '', 'teamName' => 'Germany', 'playerName' => 'Alexander'),
array('teamID' => '', 'teamName' => 'Slovakia', 'playerName' => 'Ivan'),
);
// build array with existing ids
$ids = array();
foreach ($arr as $row) {
if ($row['teamID'] !== '') {
$ids []= $row['teamID'];
}
}
// start from
$id = 1;
foreach ($arr as $i => $row) {
if ($row['teamID'] === '') {
while(in_array($id, $ids)) {
$id++;
}
// put id in $arr
$arr[$i]['teamID'] = $id;
$id++;
}
}
var_dump($arr);
This actually does not produced the expected outcome as stated in the question.
| common-pile/stackexchange_filtered |
Access one IP from outside of the subnet on the same network
I have a network which has for example the subnet <IP_ADDRESS>/24. On this, I have one embedded device that tries to access http://<IP_ADDRESS>, which is hardcoded. Without making physical changes to the network I would like that device to get a response.
What I tried was to add a static route on that device
$ route add -host <IP_ADDRESS> dev eth0
$ route
Kernel IP routing table
Destination Gateway Genmask Flags Metric Ref Use Iface
<IP_ADDRESS> * <IP_ADDRESS> U 0 0 0 eth0
<IP_ADDRESS> * <IP_ADDRESS> UH 0 0 0 eth0
and then configure one PC on the network to additionally have that static IP address. Now, from my device I could ping it, but unfortunately only if I specify the interface explicitly:
ping <IP_ADDRESS> # doesnt work
ping -I eth0 <IP_ADDRESS> # works
I could also still not access a webserver on that PC. How can I additionally make that IP address accessible without for example introducing a router?
What exactly generates the IP addresses in your LAN ?
What system does your device use? Or what interfaces does it have? (something like the output of ifconfig. If it works with the -I eth0 you have almost solved it, you probably only need to set an additional route.
This seems to be an https://en.wikipedia.org/wiki/XY_problem I would search how to configure/reconfigure the equipment I have to use the network configuration I have, instead of complicating it.
You need to add an IP alias <IP_ADDRESS>/24 to the same interface where <IP_ADDRESS>/24 is configured. The kernel will add a directly connected route automatically. Then you need to configure a web server like nginx, apache, lightttpd to be able to server web pages on http://<IP_ADDRESS>
| common-pile/stackexchange_filtered |
How can I get the caller function name in my class?
Possible Duplicate:
How to get name of calling function/method in PHP?
So in PHP I'm trying to build a custom error handler class. It's pretty simple:
I give it a code number and it gives back a formatted error message which I can use to send to mobile devices.
So it looks like this now:
class Errorhandler {
private $errors = array(
//here be error codes and messages
100 => 'Missing input or parameter!'
);
public function __construct($code = 100){
//return formatted output
}
}
So I'd use the class above like:
public function someFunction(){
//some conditions met, then throw an error
$handler = new ErrorHandler();
$this->response = $handler;
}
So I'd need the parent function's name (someFunction) in my Errorhandler class so I can return it with the formatted error message.
On a side note: I don't want to use parameters for this, too much writing there.
Have you looked at the optional arguments for http://www.php.net/manual/en/function.set-error-handler.php
__FUNCTION__
It's magic. See more here: http://www.php.net/manual/en/language.constants.predefined.php
As for actually getting it in your ErrorHandler class, you'd have to pass it to the constructor. PHP isn't that magical.
No, he meant the function that actually calls __FUNCTION__...
Wouldn't it be returning my custom errorhandler class's __construct() name?
$handler = new ErrorHandler(__FUNCTION__); ? Isn't that what you want? Then grab the function name in the ErrorHandler constructor and assign it to a class variable. Either that or use debug_backtrace() within the class itself.
Sadly no, because with this, I have to type in __FUNCTION__ every time which is boring if you know what I mean.
so declare a global reference to it called F or something and pass that. Or just use debug_backtrace() which would be less messy.
You can use debug_backtrace() to get the function that have called your function.
debug_backtrace() returns the current call stack.
You can parse debug_backtrace() function output. It returns associative array of all calls to the very top (http://php.net/manual/en/function.debug-backtrace.php)
Also notice that fatal error will interrupt normal script excution and to catch it you need to call error handler from callback registered with register_shutdown_function() (http://php.net/manual/ru/function.register-shutdown-function.php)
| common-pile/stackexchange_filtered |
Firebase cloud messaging authentication issue
I have an app, available on Google Play that is an end-to-end encrypted messenger. One of its unique advantages is that users never have to identify themselves – they just make up a name. No Google account email address, no phone number, nothing.
The app currently uses Google Cloud Messaging (GCM) and I have to convert it to Firebase Cloud Messaging (FCM) as Google is dropping support for GCM.
When we link our app to our Firebase Project we are told “This is required because the default security rules for the Android Firebase allows only authenticated users to read and write.”
When we choose an authentication process I find there is “anonymous” and the docs say “this is for temporary anonymous accounts. The word “temporary” worries me. Does this mean, somehow, that anonymous accounts will not be allowed to persist indefinitely?
Is one of the purposes behind FCM to prevent anonymous messaging? I’m aware of the Patriot act and all of it’s provisions and powers to force companies like Google to take actions the goverenment sees fit.
So the question here is: Will FCM allow anonymous accounts to be permanent?
Hi Dean. Sorry, but I'm totally confused by this post. FCM is a separate service from Firebase Auth. One can be used without the other. Importing your project to the Firebase Console for the sole purpose of migrating to FCM from GCM shouldn't require you to enable Firebase Auth or any of the other services.
It doesn't state that your app will be deleted if you don't implement Google/Facebook/Email log-in in your FCM chat app, so I think you can use Anonymous login for as long as you want. Also related, Anonymous Login by David East
| common-pile/stackexchange_filtered |
Laravel 5.3 Observer not triggering on updating event
I have created Observer for my model to update related data:
<?php
namespace App\Observers;
use App\User;
class UserObserver
{
public function updating(User $user)
{
$data = $user->getAttributes();
dd($data);
$user->worker->fill($data['worker']);
$user->push();
}
}
And add it to my model:
protected static function boot()
{
parent::boot();
parent::observe(UserObserver::class);
}
But updating method not firing when updating model:
$user = User::byUsername($username)->first();
$user->update($request->all());
I have worker.rate_per_hour value in request instance and added it to fillable of User model.
So my question is, What I am doing wrong?
It looks like $object->update($data) does not fire the updating or updated events. Try with $object->prop = 'val'; $object->save()
Your decision firing saving event, not updating.
Register the observer in the AppServiceProvider:
dir: /app/Providers/AppServiceProvider.php
class AppServiceProvider extends ServiceProvider
{
/**
* Bootstrap any application services.
*
* @return void
*/
public function boot()
{
User::observe(UserObserver::class); // here
}
}
Note: Make sure you cleared cache & fired composer dumpauto command
| common-pile/stackexchange_filtered |
Selecting Metrics in Keras CNN
I am trying to use CNN for trying to classify cats/dogs and noticed something strange.
When i define the model compile statement as below -
cat_dog_model.compile(optimizer =optimizers.Adam(),
metrics= [metrics.Accuracy()], loss=losses.binary_crossentropy)
my accuracy is very bad - something like 0.15% after 25 epochs.
When i define the same as
cnn.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
my accuracy shoots upto 55% in the first epoch and almost 80% by epoch 25.
When I read the Keras doc - https://keras.io/api/optimizers/ they mention explicitly that
You can either instantiate an optimizer before passing it to model.compile(), as in the above example, or you can pass it by its string identifier. In the latter case, the default parameters for the optimizer will be used.
Also the metrics parameter are also as per the API - Keras Metrics API
So as per my understanding i am using default parameters on both. Also when i change the metrics parameter to hardcode I get the same accuracy. So somehow the accuracy metrics is causing this issue. But I cant figure out why - Any help is appreciated.
My qn is why is hard coding metrics better than defining it as parameter?
Some more details : I am trying to use 8k images for training and about 2k images for validation.
sample code (you can change the line number 32 to get different results) :
from keras import models, layers, losses, metrics, optimizers
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator, load_img,img_to_array
train_datagen = ImageDataGenerator(rescale = 1./255,shear_range = 0.2,zoom_range = 0.2,horizontal_flip = True)
train_set = train_datagen.flow_from_directory('/content/drive/MyDrive/....../training_set/',
target_size = (64, 64),batch_size = 32,class_mode = 'binary')
test_datagen = ImageDataGenerator(rescale = 1./255)
test_set = test_datagen.flow_from_directory(
'/content/drive/MyDrive/........./test_set/',
target_size = (64, 64),batch_size = 32,class_mode = 'binary')
cat_dog_model = models.Sequential()
cat_dog_model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu', input_shape=[64, 64, 3]))
cat_dog_model.add(layers.MaxPool2D(pool_size=2, strides=2))
cat_dog_model.add(layers.Conv2D(filters=32, kernel_size=3, activation='relu'))
cat_dog_model.add(layers.MaxPool2D(pool_size=2, strides=2) )
cat_dog_model.add(layers.Flatten())
cat_dog_model.add(layers.Dense(units=128, activation='relu'))
cat_dog_model.add(layers.Dense(units=1, activation='sigmoid'))
cat_dog_model.compile(optimizer =optimizers.Adam(), metrics= [metrics.Accuracy()], loss=losses.binary_crossentropy)
cat_dog_model.summary()
cat_dog_model.fit(x=train_set,validation_data=test_set, epochs=25)
They should be both the same. Under the hood, keras map the string to Optimizer class and initiate it with the default parameters as you said. In order to make sure, you need to fix your random seed and try with both methods. Then you will see that you the matched results. To fix the seed, check this link
You haven't asked a question. What is your question?
@Coderji arguably, a difference in accuracy between 15% and 80% cannot be attributed to the random seed
@desertnaut it still can happen, specially the image model is very small and have only 8K of data to train with no transfer learning. Additionally there are alot of randomness happening in the image generator that can give different results. At the end optimizer string are mapped to the correspondent object using Factory design pattern not more or less.
don't think it is the optimizer. Think it is the way you defined the metric.. Try defining the metric in the same way for both cases of optimizer.
@GerryP - thank you. It was indeed the metrics.. but why is the metrics causing so much of an issue. The api page (https://keras.io/api/metrics/accuracy_metrics/) says we can use it this way so why is it happening.. I will also modify the qn accordingly
| common-pile/stackexchange_filtered |
In Ruby, how do I assign a value to a class instance
I want to create a class instance that has a value such that I can do something like puts a = Example.new(1) where a's value is specified in initialize.
I expect that this is a simple problem since all predefined Ruby classes allow this, but I'm unable to figure out how to do it for my classes.
puts a = Example.new(1) doesn't make sense. You're printing the result of an assignment which is always nil. Think of it as puts (a = Example.new(1)) Do you mean puts Example.new(1)
@nita No, the return value of puts is nil, but puts(a = 1); a # => 1 and is perfectly valid.
@nlta the result of an assignment is the assigned value.
You could define a getter via attr_reader :a, assign the corresponding instance variable @a in initialize and print it via puts Example.new(1).a
@ArkanasStacker Could you explain your question a bit better? Are you looking to define what is printed for an instance during puts? You have to implement def to_s in your class for this.
You need to correct your terminology. You can only assign values to variables, but a class instance is not a variable. I expect you want to know how can you assign a value to an instance variable for a given class instance. As you refer to a = Example.new(1) I assume you have something like class Example; def initialize(a); @a = a; end; end. Then inst = Example.new(1) #= #<Example:0x00007f89341179b0 @a=1>. Notice that inst holds the instance and the current value of the instance variable @a is seen to be 1. You can change that with inst.instance_variable_set('@a', 2) #=> 2...
...To confirm @a's current value we can write inst.instance_variable_get('@a') #=> 2. See Object#instance_variable_set and Object#instance_variable_get. These methods are not needed if, as @Stefan mentions, setter and getter methods have been defined using Module#attr_accessor.
Please clarify your specific problem or provide additional details to highlight exactly what you need. As it's currently written, it's hard to tell exactly what you're asking.
Class#new and Return Values
Your example doesn't quite work because Ruby treats Class#new as a special case, and is expected to invoke the #initialize method and return an object. If it didn't, calling #new on a class would surprise a lot of people by returning the last evaluation of the initializer from your class, or from Object#new if it's otherwise undefined for your class. In either case, this would violate the principle of least surprise.
However, you can do what you want pretty easily by simply creating an accessor method and then chaining off of Example#new. For example, in Ruby 3.1.0:
class Example
attr_reader :int
def initialize(int) = (@int = int)
end
# prints `1` to STDOUT and assigns the value to *a*,
# but returns nil because you're using Kernel#puts
# which always returns nil
puts a = Example.new(1).int
# shows that the local variable *a* is set to the value
# returned by the Example#int accessor for the class'
# @int instance variable
a
#=> 1
To avoid the confusion of having a nil return value (even though this is expected with Kernel#puts, just change your puts statement to use Kernel#p instead:
p a = Example.new(2).int
#=> 2
Refactoring the Example Class for Older Rubies
If you're using an older Ruby than 3.0, you can't use an endless method or the improved handling for them in Ruby 3.1. The only difference is that rather than an inline method, you need to specify it with the standard def...end syntax, e.g.:
class Example
attr_reader :int
def initialize(int)
@int = int
end
end
Otherwise, the points above are valid as far back as any currently-supported Ruby version.
new is special by convention, you're free to write your own new method that returns whatever you want. That would be a terrible thing to do but you're free to do it.
A popular example is Struct which overrides ::new to return a new custom anonymous class each time.
"Your example doesn't quite work because Ruby treats Class#new as a special case" – There is nothing special about Class#new. It is basically just class Class; def new(...) obj = allocate; obj.__send__(:initialize, ...); obj end end
| common-pile/stackexchange_filtered |
What is the difference between "under ..." and "stipulated by ..." in legal expression?
I have a question about whether I should use “under” or “stipulated by” in the sentence below.
Responsible for navigation, cargo handling, fire and life-saving equipment, and other duties related to safe operation of ships under international conventions.
Responsible for navigation, cargo handling, fire and life-saving equipment, and other duties related to safe operation of ships stipulated by international conventions.
Because duties of mariners are mandatary and written into international conventions, I am wondering whether the use of these two words may cause different meanings and understanding to readers. Which word is best to reflect the meaning of "written into"?
In addition, can I use “stipulated under” to replace stipulated by? Is it correct?
| common-pile/stackexchange_filtered |
Calculate number of days from date time column to a specific date - pandas
I have a df as shown below.
df:
ID open_date limit
1 2020-06-03 100
1 2020-06-23 500
1 2019-06-29 300
1 2018-06-29 400
From the above I would like to calculate a column named age_in_days.
age_in_days is the number of days from open_date to 2020-06-30.
Expected output
ID open_date limit age_in_days
1 2020-06-03 100 27
1 2020-06-23 500 7
1 2019-06-29 300 367
1 2018-06-29 400 732
Make sure open_date in datetime dtype and subtract it from 2020-06-30
df['open_date'] = pd.to_datetime(df.open_date)
df['age_in_days'] = (pd.Timestamp('2020-06-30') - df.open_date).dt.days
Out[209]:
ID open_date limit age_in_days
0 1 2020-06-03 100 27
1 1 2020-06-23 500 7
2 1 2019-06-29 300 367
3 1 2018-06-29 400 732
| common-pile/stackexchange_filtered |
Module 'random' has no attribute 'randomint'
I have read other post while trying to solve this problem. All of them seem to say that the file is the name random.py but my file name is test.py
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidedly so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again Later'
r = random.randomint(1,5)
fortune = getAnswer(r)
pint(fortune) ```
It's randint not randomint.
@yazou This is a simple typographical error, it hardly deserves “validating”. It deserves closing.
@donkopotamus : Okay. I removed my comments
The function is randint not randomint. PLease change your code in this way :
import random
def getAnswer(answerNumber):
if answerNumber == 1:
return 'It is certain'
elif answerNumber == 2:
return 'It is decidedly so'
elif answerNumber == 3:
return 'Yes'
elif answerNumber == 4:
return 'Reply hazy try again'
elif answerNumber == 5:
return 'Ask again Later'
r = random.randint(1,5)
fortune = getAnswer(r)
pint(fortune)
| common-pile/stackexchange_filtered |
Filter rows in SLT
How to filter rows in SLT (SAP Landscape Transformation)
The filter should be based on one column of the table. And only if a cell in this row has a specific content then the row should be replicated.
Filters can be defined in the transaction LTRS
Example
Filter rows if in column /BIC/R_GES is any value other than L
if i_/BIC/R_GES_1 <> 'L'. SKIP_RECORD. endif.
Note the prefix "i_" and the appendix "_1"
| common-pile/stackexchange_filtered |
How do I configure the Kendo Upload widget to hide the files list
I'm developing with telerik's kendo framework using their mvvm bindings. I'm having trouble with the configuration of the uploader widget, configured as the following:
<input name="files"
id="fileUploader"
type="file"
data-role="upload"
showFileList="false"
data-async="{ saveUrl: '/api/', removeUrl: 'remove', autoUpload: true}"
data-bind="enabled: isEnabled, events: { select: eventAddFile }">
How do I configure the showFileList Configuration option to be false?
You can use:
data-show-file-list="false"
The temp solution is this but I would ultimately handle this in the html.
uploader = $('#fileUploader').data("kendoUpload");
uploader.options.showFileList = false;
| common-pile/stackexchange_filtered |
Replacing a Freeradius library
I have a freeradius 2.0.4 server i want to update.it has been running without interruption for 2 years.
My update consists only in updating the chap module without creating a new one.
To avoid discontinuity of service and be sure that all configs remain the same, i am trying to find a smooth way to do that on a testing machine.
I thought it would be possible to simply replace the rlm_chap-2.0.4.so library with my modified version of it. but this produces a segfault (most probably caused by a library conflict).
Other information:
- Freeradius starts and segfault is produced when the chap module is call at the first request.
- My library works fine when it is used along with the compiled version from sources of freeradius
- I am using 2 versions of Freeradius: 2.0.4 installed through an apt-get install and the compiled version from apt-get sources
- The library conflict is caused by the modified library and by nothing else. If i restore the original version of rlm_chap-2.0.4.so (and do nothing else) it works fine again.
- I am still a noob in compilation/library manipulations/freeradius, so i'm probably doing a lot of things wrong.
Any ideas?
Thanks!
I'm not sure what you mean by "library conflict".
But anyway, you shouldn't do this. If you're modifying rlm_chap it must be built against the same version of the source as libfreeradius and radiusd.
If you're attempting to use a packaged version of rlm_chap from a more recent release of FreeRADIUS (say 2.2.0) it likely will not work because the internal APIs of FreeRADIUS may of changed.
With FreeRADIUS 3.0.0 this is explicitly forbidden. The server will refuse to start if it loads a module linked against a different version of the library.
It will also refuse to start if it was built with a different version of OpenSSL to the one it's linking to at runtime.
If you're set on updating rlm_chap, you will need to download the official release of 2.0.4, swap out the rlm_chap code in src/modules/rlm_chap, attempt to build the module, fix any build errors, and then use that version in place of the current .so.
I solved my problem myself but forgot to post it here, I was actually misunderstanding the difference between compiling and building from the source code. A "./configure make make install" uses different compilation options and settings than a "fakeroot dpkg-buildpackage -b -uc" as described in freeradius wiki. When building from sources, the libraries were perfectly interchangeable.
Thanks anyways
I will still mark your answer as the right one for you last paragraph.
| common-pile/stackexchange_filtered |
Pumping lemma for context-sensitive language?
i have googled on pumping lemma for context sensitive, and it seems to only produce results for context-free language.
Pumping lemma only allows to prove a language is context free only? and not context sensitive?
Any idea how?
There are two Pumping Lemmas. Pumping Lemma for regular languages allows to prove that a language is not regular. Pumping Lemma for context-free languages allows to prove that a language is not context-free and hence not regular.
There are no other Pumping Lemmas. To prove that language is context sensitive you could first using Pumping Lemma prove that it is not context-free. Then you must supply a context sensitive grammar that actually generates given language.
-1 I wouldn't say that there are no other pumping lemmas. There are similar "iteration lemmas" for LL(k) languages, and even for CFLs there are other lemmas you can use (Ogden's lemma, for example). Moreover, to prove that something is a CSL, you do not need to disprove that it is context-free; any CFL is also a CSL. On top of that, you can't always use the pumping lemma for CFLs to prove that a language is not CFL. Finally, you can also prove that something is a CSL by providing an LBA, not just a CSG.
@templatetypedef "Pumping Lemma" was meant literally. "Similar" approaches are everywhere. Normally you would like to prove that the language is exactly CSL. And of course there are many other possibilities, and exactly because of that I've written "you could".
Pumping lemmas exist for regular, context-free, tree-adjoining, and multiple-context-free languages. There is a good survey in Johan Behrenfeld's master's thesis:
http://www.flov.gu.se/digitalAssets/1302/1302983_behrenfeldt-johan-alinguists.pdf
There is no pumping lemma for context-sensitive languages. Indeed, this class has considerably more generative power, and include languages without any kind of "pumping" property, e.g. {a^p | p prime}.
Each pumping lemma states a property that is true of a language in that class. It can be used to prove that a language is not in that class, as a proof by contradiction. It cannot be used to prove that a language is in that class.
The "pumping lemma-like" approach for tree-adjoining languages actually is named the "pumping lemma for tree-adjoining languages" everywhere in the literature. It makes it possible to prove that a language is not tree-adjoining, and therefore not mildly context-sensitive. Maybe this is the one you had in mind?
It was defined by Vijay-Shanker in his PhD thesis, which is unfortunately not available online. It is nonetheless easy to find how it works by searching the web. Many courses, for instance this one from the University of Tübingen, give a good account.
| common-pile/stackexchange_filtered |
How can I find out where the cursor is?
I want to find out where the cursor is in the terminal. For example, I want to know if the cursor is at the beginning of a new line or which position in a line it is.
I would prefer if an escape code existed for this but it's acceptable if only the windows console API has the way to accomplish this.
There is no cursor as far as C is concerned. You need an OS-specific library that adds that capability.
@tadman, This question has nothing to do with C apart from the fact that it is the language I'm using to utilize escape codes and the windows console api. Does the windows console api contain a function that can detect where the cursor is? Or even better, does an escape code exist for this purpose?
GetConsoleScreenBufferInfo returns a CONSOLE_SCREEN_BUFFER_INFO structure which contains the cursor position. You can look up this stuff yourself – see Console Functions
@WeatherVane My apologies, I spent most of my time searching for an escape code equivalent since escape codes are supported on Linux, Windows and macOS while the windows console api is only supported on windows. Thanks.
I learnt recently that Windows support virtual terminals, but have never used them.
@WeatherVane, most console applications on Windows already use the console API which is older and more established. Additionally, some windows console API functions do not have an equivalent virtual escape sequence.
... look at Console Virtual Terminal Sequences and search for "Report Cursor Position" about 40% down the page.
This sounds like an XY problem to me. What are you planning on using this for, maybe there's a better solution. E.g. a library like readline.
@WeatherVane I'm sorry I missed this in the docs. Thanks.
@Barmar I'm building a text editor and I have successfully detected when the backspace key is being pressed and I printed an escape code to delete a character. However, the way I implemented this restricted me in deleting a '\n' char from the beginning of a new line. Perhaps I will change my implementation.
Use the ncurses library to do full-screen terminal control.
| common-pile/stackexchange_filtered |
How to create a request in python insecure by header
I want to request to show my pods in kubsernets
and this command work for me
curl -X GET https://<IP_ADDRESS>:6443/api --header "Authorization: Bearer $TOKEN" --insecure
{
"kind": "APIVersions",
"versions": [
"v1"
],
"serverAddressByClientCIDRs": [
{
"clientCIDR": "<IP_ADDRESS>/0",
"serverAddress": "<IP_ADDRESS>:6443"
}
]
I want to write it in python on windows so I write this code
import requests
auth_token='eyJhbGciOiJSUzI1NiIsImtpZCI6IiJ9.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRlZmF1bHQtdG9rZW4tN2Y2NGYiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC5uYW1lIjoiZGVmYXVsdCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50LnVpZCI6IjQ5OThhYzJkLTcwZGMtMTFlOS1hNzQ2LWZhMTYzZTBmNmI5MSIsInN1YiI6InN5c3RlbTpzZXJ2aWNlYWNjb3VudDpkZWZhdWx0OmRlZmF1bHQifQ.nhvHvzSkM_cy84mxra5i12jUNNrsWOf3XSoZtKvMXkIW07Ftto-ce8tr_gceAExbTYdVY5lmhxptHIosevfVCAafceLwE8wN3gXsaguaU8nZjXSR_fX5lFSK5J1s19hfh2vl2lKkb-A2_Yu2j3RdFn70LPL6dRKg9GmJIyIREICe3jq1ZATQj6V9rRjXg1wHc9qdnESmlb5qc9V9_ZJuiT_WbSXwzpgUmwm1YuwajxmV7rbFSFd-TKXsotGIwijoCztxbRRgy_8m_xoinC9UnUtLV-TrRNrSBhuuZe0Wl6ZoItjOSOfMj0NkE5EHPGqqvPjgRcSwMWvUc-pZ6UjoNw'
hed = {'Authorization': 'Bearer ' + auth_token}
url = 'https://<IP_ADDRESS>:6443/api'
requests.packages.urllib3.disable_warnings()
response = requests.get(url, headers=hed,verify=False)
print(response)
print(response.json())
After I run it in pycharm I get this error
requests.exceptions.ConnectionError: HTTPSConnectionPool(host='<IP_ADDRESS>', port=6443): Max retries exceeded with url: /api (Caused by NewConnectionError('<urllib3.connection.VerifiedHTTPSConnection object at 0x06152BB0>: Failed to establish a new connection: [WinError 10060] A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'))
And if I run it in ubuntu I get the correct respond
{u'serverAddressByClientCIDRs': [{u'clientCIDR': u'<IP_ADDRESS>/0', u'serverAddress': u'<IP_ADDRESS>:6443'}], u'kind': u'APIVersions', u'versions': [u'v1']}
What should I do?
can you try requests.get instead of requests.post?
it does not any changes
You say you are running it "on Ubuntu" and "in PyCharm" -- are these on the same machine and/or the same network?
they are in different machine in same network the pycharm is on windows
Did the initial curl command work on Windows?
in windows it works with postman
| common-pile/stackexchange_filtered |
How to make page scroll to a specific element when clicking a button? Scroll animation, if possible
As the title says I want to make the page scroll to a specific element on the page when the user clicks a text/image. If it's possible, I don't want it to just skip to that element, but have a scrolling animation to it.
I know it's possible to do in JS, but I forgot how you do it.
possible duplicate of jQuery animate scroll
I found another post with similar question http://stackoverflow.com/questions/15664799/how-to-scroll-to-element-cross-browser-using-jquery-animate
Depending on your specific needs you can organise the code in such way, so that you can pass the id of the element that needs to be scrolled to the click handler.
<a href="#scrollHere">Click me</a>
<div id="scrollHere"></div>
$('a[href^="#"]').on('click', function(event) {
var target = $(this.href);
if( target.length ) {
event.preventDefault();
$('html, body').animate({
scrollTop: target.offset().top
}, 1000);
}
});
This will scroll the page to #scrollHere element over a period of one second (1,000 milliseconds = 1 second).
Or in a more generic way:
$('.someElement').on('click', function(event) {
var target = $('#someElementToScrollTo');
$('html, body').animate({
scrollTop: target.offset().top
}, 1000);
});
| common-pile/stackexchange_filtered |
Save and load big array (> 100 gb) python, and/or reduce size
I need to save a really big array (is a matrix of doubles, with size of 5e5 x 3e4.
The context is: I have a 1D simulation of viscous disc, each row is a snapshot of the simulation ( the surface density).
all the data is relevant (more or less), so in principle i cannot reduce the size of the matrix.
I tried using np.save and h5py. with that, a matrix of 5e4x1.5e3 has a size of 6 gb in the disc. h5py is faster than np.save writing it, but I dont know if that will be the case for the full simulation (which should be something like 110 GB.
is there a way to store the data in less space? or, is there another way to save and load the data that can be faster than the other two methods?
Thanks!
Is your data dense or are zeros a possibility?
there are some zeros, but they are easily less tan 20% of the data
Well I would say sparse matrices will not help you in that case. You've said no reduction so SVD, NMF, PCA, etc go right out the window. So I really don't know how else you are going to save space. One obvious and somewhat unrelated concern would be that trying to operate on something so large your system should probably have at least that much memory.
Is float really not sufficient for your results? Is the data compressible? Do you wan't to save the snapshots directly when they are callculated, or all the data at the end of the simulation? And the most important question, how do you wan't to read your data afterwards (only subsets of a given shape?)
Floats are enough, I don't know what you mean with compressible. I can save them directly now . I would like to read some rows individually, depending of what part of the simulation is relevant. I don't need all the snapshots, but I cannot truly know a priori which one to save and which no. Thanks
| common-pile/stackexchange_filtered |
How can I debug the name of any button pressed with the Unity New Input System?
When using the new input system, some build targets such as andoid can shuffle the names of controller inputs, messing up the bindings.
How can I listen to all inputs and print the name of the button pressed at runtime?
Solution adapted from this form answer (which no longer works).
You want to create an input binding at runtime wich matches all button inputs, using an InputControlPath, which provides a "regular-expression-like" syntax for creating input bindings.
var debugAction = new InputAction(binding: "/*/<button>");
This particular InputControlPath will match any controler type with * and then any button with <button>.
We then take this and assign it's performed event to a labda which can read the button name from the InputControl variable provided by the callback context.
debugAction.performed += (ctx) => Debug.Log($"Button {ctx.control.name} pressed!");
debugAction.Enable();
| common-pile/stackexchange_filtered |
getting 504-Bad Gateway error while saving Instagram images
I am trying to save Instagram images on my local machine through API and using the file_put_contents php method
I have tried sleep() method
ini_set('max_execution_time', 400);
$access_token = "";
$photo_count = 8;
$json_link = "https://api.instagram.com/v1/users/self/media/recent/?";
$json_link .="access_token={$access_token}&count={$photo_count}";
$json = file_get_contents($json_link);
$obj = json_decode(preg_replace('/("\w+"):(\d+)/', '\\1:"\\2"', $json), true);
$cnt = 1;
foreach ($obj['data'] as $post){
$pic_text = $post['caption']['text'];
$pic_link = $post['link'];
$pic_like_count = $post['likes']['count'];
$pic_comment_count=$post['comments']['count'];
$pic_src=str_replace("http://", "https://", $post['images']['standard_resolution']['url']);
$pic_created_time=date("F j, Y", $post['caption']['created_time']);
$pic_created_time=date("F j, Y", strtotime($pic_created_time . " +1 days"));
$img = $_SERVER['DOCUMENT_ROOT']."/assets/images/insta-images/".$cnt.".jpg";
file_put_contents($img, file_get_contents($pic_src));
$cnt++;
}
With the above code, there are only 4 images are successfully saved on my local machine but I have set photo count 8, the script is running for the only 1 minute.
I want a minimum of 8 images should save
| common-pile/stackexchange_filtered |
why does Inquire pos returns 0 in Fortran 90
I'm reading a binary file, direct access, in Fortran 90, and i'm trying to find out the pointer position. For that i use:
inquire(unitvector, pos=cur_pos)
But when i print the cur_pos it's always 0, even after i've read a substantial amount of the file. Why is it 0?
To compile i use:
$ ifort -warn all -traceback -free util.F fold2Bloch.f90 -o fold2Bloch
pos= isn't Fortran 90 - which is part of the problem.
The variable used for the POS= specifier is defined only for a file connected with stream access, rather than the chosen direct access. 0 is a possible result in this case.
With direct access the position perhaps doesn't really have much general value: the point of such a connection is random access.
From the draft standard, section <IP_ADDRESS> (my emphasis):
The scalar-int-variable in the POS= specifier is assigned the number of the file storage unit immediately following the current position of a file connected for stream access. [..] If [..] the file is not connected for stream access [..] the variable becomes undefined
That sure explains it. Do you know of another way to see how far along the way you are?
If you're doing direct access then keep count of how many records you've read and find how many you will read in total?
Hmm, that's an idea. However, when i inquire for record length, using recl, i get 540. I've done a bit of reading and in direct access all record lengths must be the same. Now what does that 540 mean? Bytes, bits?
It is probably bytes, but it will be 540*FILE_STORAGE_SIZE bits. See your compiler's documentation.
@AntonB For direct access, there is no concept of the current position in the file. You read or write a record as specified by the REC= specifier in the READ/WRITE statement. In practice, all implementations running on Unix/Windows type systems place record #N starting at (N-1)RECLFILE_STORAGE_SIZE bits.
And once again i'm stuck, the math just isn't adding up. Can you guys make a suggestion as to how i should approach my problem. I'm trying to make a progress bar based on percentage of the file read (maybe another way?). These are 100MB and higher binary files.
| common-pile/stackexchange_filtered |
Adding a search bar to the top of a UITableView
I have a UITableView and am currently working on adding a custom search bar at the top consisting of a UIView with a UITextField inside of it. As is standard for iOS apps, the search bar should only be visible when the table view is scrolled to the top — when scrolling down it should disappear off the screen along with the other cells.
However, I cannot figure out a way to achieve this effect. If I place the search bar at the top of the table view, it will overlay the cell beneath it. If I place it 50 pixels above the table view, it is not possible for the user to select it since it will automatically disappear when the user releases their finger from the screen.
Can someone please enlighten me as to how to achieve this effect?
Usually you just use UISearchBar as a tableViewHeader for your table view. If you want it hidden when the user enters the screen (like it's done in most native apps) you can just set contentOffset for the tableView in viewWillAppear.
And I am pretty sure that's in fact how they do it. And if you think about it it's what tableHeaderView is meant for.
Try something like this:
- (void)viewDidLoad
{
[super viewDidLoad];
UISearchBar *searchBar = [[UISearchBar alloc] initWithFrame:CGRectMake(0., 0., 320., 44.)];
self.tableView.tableHeaderView = searchBar;
}
- (void)viewWillAppear:(BOOL)animated
{
[super viewWillAppear:animated];
CGPoint contentOffset = self.tableView.contentOffset;
contentOffset.y += CGRectGetHeight(self.tableView.tableHeaderView.frame);
self.tableView.contentOffset = contentOffset;
}
Note that in iOS 7 you should not just set the contentOffset of your tableView to CGPointMake(0., CGRectGetHeight(self.tableView.tableHeaderView.frame)) if your viewController has automaticallyAdjustsScrollViewInsets set to YES, since it probably will not be CGPointZero in viewWillAppear:
Nice, but how would you trigger the hiding once there is nothing in the search bar @dariaa ?
@dariaa's answer updated for Swift 3:
override func viewDidLoad() {
super.viewDidLoad()
let searchBar = UISearchBar(frame: CGRect(x: 0, y: 0, width: self.view.frame.size.width, height: 44))
self.tableView.tableHeaderView = searchBar
}
override func viewWillAppear(_ animated: Bool) {
super.viewWillAppear(animated)
var contentOffset: CGPoint = self.tableView.contentOffset
contentOffset.y += (self.tableView.tableHeaderView?.frame)!.height
self.tableView.contentOffset = contentOffset
}
Though you'll probably need to set the searchBar as a property if you want to use it.
If you only want your search to be visible when the UITableView is scrolled all the way to the top, make a UITableViewCell subclass that houses your UISearchBar. Then, in tableView:cellForRowAtIndexPath, check if the indexPath is (0,0). This is the table view telling you it is creating the cell at the very top, so then just create your search bar cell instead of your default cell.
Code would look something like this:
- (void)viewDidLoad
{
[self.tableView registerClass:[SearchBarCell class] forCellReuseIdentifier:@"SearchBarCell"];
}
- (UITableViewCell *)tableView:(UITableView *)tableView cellForRowAtIndexPath:(NSIndexPath *)indexPath
{
if (indexPath.row == 0 && indexPath.section == 0) {
//this is the cell that displays the UISearchBar
SearchBarCell *cell = [tableView dequeueReusableCellWithIdentifier:@"SearchBarCell"];
return cell;
}
else {
//create your usual table view cell normally
}
}
There may be a cleaner way to determine the row and section of the indexPath, but I'm writing this code off the top of my head and don't recall a better way.
The searchBar should not be displayed in a cell, but in the tableView header.
| common-pile/stackexchange_filtered |
Turtle Graphics with Recursion
I need to draw a shape using recursion and turtle graphics.
I'm more of a looper and rarely ever use recursion, so some help here with this would be nice. Not even sure where to start.
The shape just needs to be cool looking.
Welcome to SE, what have you tried so far?
I'm not sure where to start, so a beginning would be awesome if anyone could help me and the rest I can attempt.
Have a look to http://www.kidscoderepo.com/python.html, it is a good point to start and meets your cool looking requirement
Without any parametrization, here a beginning:
import time
from turtle import *
def recurse(n):
if n>0:
left(10)
forward(5)
recurse(n-1)
recurse(20)
time.sleep(5)
When you create recursive function, you need to have a stop criterion that effectively guaranties your program will exit at some point.
Thanks everyone for your answers. I think I'm on the right track now.
Oh man! What a fun problem :)
As you are a self-proclaimed 'looper', I would think about recursion just as you would looping.
In a for loop, you execute the body of the loop until the for loop condition is satisfied. Now, recursion is very similar. You keep calling the function until the parameters of the function no longer hit a recursive case. They hit a base case instead which then returns a value that the recursion can then build up on.
So thinking about recursion this way, let's think about how to draw a square. You need to first identify what parts of the code get repeated (i.e. what would be in the body of a for loop trying to do the same thing). Then, identify when you want this repetition to stop (i.e. how do I know when a for loop exits).
While drawing a square, I can think of two major things that get repeated at least 4 times. The turtle goes forward a certain number of steps and the turtle turns 90 degrees (or 270 degrees depending on orientation). So this would be something we detail in our recursive case.
Now, let's think about the base case. Well, we know that a square has 4 sides, so after the turtle draws four sides, we want it to stop.
Lastly, let's think about the function declaration and how these two pieces, the recursive case and the base case, play into it. A function declaration may take the following form (in Python):
def draw_square_recursive(turn_deg=90, side_len, sides=4):
"""
Function draws a square with turtle graphics recursively
:param turn_deg: an int, the number of degrees a turtle should turn
:param side_len: an int, the length of a side of the square
:param sides: an int, the number of sides in our square
"""
turn_deg and side_len will be important for our recursive case as they define how a turtle should turn and how far it should 'walk'. sides is an interesting parameter and one we can use to dictate whether to continue recurring or stop. If we subtract 1 from sides every time we draw a side, we will know that we need to stop recurring when sides == 0, a base case!
Thus, whenever we call our function to recur again, we will call it as, draw_square_recursive(side_len, sides-1):
Overall, the structure of the function would look like:
def draw_square_recursive(turn_deg=90, side_len, sides=4):
"""
Function draws a square with turtle graphics recursively
:param turn_deg: an int, the number of degrees a turtle should turn
:param side_len: an int, the length of a side of the square
:param sides: an int, the number of sides in our square
"""
if sides == 0:
# base case!
else:
# recursive case!
Note that this function named draw_square_recursive but it can be much more generalized to other shapes. Do you see how?
Sorry if this was a long winded answer! Hope it helps ;p
More editorial than answer, but recursion like this:
def recurse(n):
if n>0:
left(10)
forward(5)
recurse(n-1)
which is better written as iteration:
for n in range(2):
left(10)
forward(5)
is similar to folks who ask, "How can I count the number of elements in a list using recursion?" Ditto drawing a square using recursion.
I understand the goal is to learn about recursion, but what seems to get lost is that there are times where recursion makes wonderful things happen and times where it just slows down your program. Fractals are an opportunity to so something of wonder with recursion:
import sys
from turtle import Turtle, Screen
def hilbert_curve(n, turtle, angle=90):
if n <= 0:
return
turtle.left(angle)
hilbert_curve(n - 1, turtle, -angle)
turtle.forward(1)
turtle.right(angle)
hilbert_curve(n - 1, turtle, angle)
turtle.forward(1)
hilbert_curve(n - 1, turtle, angle)
turtle.right(angle)
turtle.forward(1)
hilbert_curve(n - 1, turtle, -angle)
turtle.left(angle)
depth = int(sys.argv[1])
size = 2 ** depth
screen = Screen()
screen.setworldcoordinates(0, 0, size, size)
yertle = Turtle('turtle')
yertle.speed('fastest')
yertle.penup()
yertle.goto(0.5, 0.5)
yertle.pendown()
hilbert_curve(depth, yertle)
yertle.hideturtle()
screen.exitonclick()
USAGE
% python3 hilbert.py 5
(PARTIAL) OUTPUT
I'm not picking on the other answers, I'm suggesting you think big (or at least beyond "just needs to be cool looking".)
| common-pile/stackexchange_filtered |
Bar-chart gradient background not showing - Highcharts
I am using Highcharts.js-library to create a bar-chart at my website and I would like to have a gradient as background-color (fill) of the bars. I have done it here in JSFiddle and everything is working fine. But when I use the same code at my website, it is not working and I can't figure out why.
My js-code is like this:
$(function () {
Highcharts.chart('container', {
title: {
text: ''
},
xAxis: {
categories: ['Driftsinntekter forrige år', 'Resultat før skatt forrige år', 'Egenkapital', 'Driftsresultat', 'Årsresultat']
},
defs: {
gradient0: {
tagName: 'linearGradient',
id: 'gradient-0',
x1: 0,
y1: 0,
x2: 0,
y2: 1,
children: [{
tagName: 'stop',
offset: 0
}, {
tagName: 'stop',
offset: 1
}]
}, glow: {
tagName: 'filter',
id: 'glow',
opacity: 0.5,
children: [{
tagName: 'feGaussianBlur',
result: 'coloredBlur',
stdDeviation: 1.5
}, {
tagName: 'feMerge',
children: [{
tagName: 'feMergeNode',
in: 'coloredBlur'
}, {
tagName: 'feMergeNode',
in: 'SourceGraphic'
}]
}]
}
},
series: [{
type: 'bar',
keys: ['y', 'selected'],
data: [
[29.9, false],
[71.5, false],
[106.4, false],
[129.2, false],
[144.0, false]
]
}]
});
});
And my CSS is like this (its in a separate file):
@import 'https://code.highcharts.com/css/highcharts.css';
#container {
height: 400px;
max-width: 800px;
min-width: 320px;
margin: 0 auto;
}
/* Define the stop colors */
#gradient-0 stop {
stop-color: #DBFDFC;
}
#gradient-0 stop[offset="0"] {
stop-opacity: 1;
}
#gradient-0 stop[offset="1"] {
stop-color: #2E3D50;
}
/* Apply the gradients */
.highcharts-point-select, .highcharts-color-0 {
filter: url('#glow');
stroke: transparent;
fill-opacity: 1;
fill: url('#gradient-0');
}
Here is the live-page - if you scroll down to "Regnskap"-section you will see the chart-grid but no bars. If you use the web-dev-tool you can see that there is a bar at "Driftsinntekter forrige år" but it is just not showing.
I am including the library and the css-file in head-tag as following:
<script src="http://code.highcharts.com/highcharts.js"></script>
<link rel="stylesheet" href="/resources/css/charts.css">
Hope somebody can help. Thanks in advance!
There seem to be a problem in your url to your fill and filter:
```fill: url(#gradient-0);```
```filter: url(#glow);```
Now its working. My problem was that I was including the wrong Highcharts-mode in head-tag.
I had to include this one (styled mode):
<script src="https://code.highcharts.com/js/highcharts.js"></script>
| common-pile/stackexchange_filtered |
How to add a field from body to a condition
I use the @StreamListener annotation to listen to the topic. How can I add a field from body to the condition? An example of an entity that is sent to the topic:
public class Event {
private String eventId = UUID.randomUUID().toString();
private LocalDateTime eventTime = LocalDateTime.now();
private Entity entity;
}
public class Entity {
private String id = UUID.randomUUID().toString();
private String name;
}
I need to filter by the name field from the Entity class
As you can see from version 3.0, you should avoid using filtering based on the message payload. Notice these lines from the documentation:
The preceding code is perfectly valid. It compiles and deploys without any issues, yet it never produces the result you expect.
That is because you are testing something that does not yet exist in a state you expect. That is because the payload of the message is not yet converted from the wire format (byte[]) to the desired type
So, unless you use a SPeL expression that evaluates raw data (for example, the value of the first byte in the byte array), use message header-based expressions (such as condition = "headers['type']=='dog'").
So, for your case, you could add one more header into your messages and filter by using conditions on your header, e.g:
Message<Event> message = MessageBuilder.withPayload(event)
.setHeader(KafkaHeaders.MESSAGE_KEY, event.getRequestId().toString().getBytes(StandardCharsets.UTF_8))
.setHeader("entityName", event.getEnity().getName().getBytes(StandardCharsets.UTF_8))
.build();
this.streamBridge.send("binding", message);
And now your condition would be condition = "headers['entityName']=='ABCName'"
Notice: Annotation-based programming model. Basically the @EnableBinding, @StreamListener and all related annotations are now deprecated from version 3.x
| common-pile/stackexchange_filtered |
Datagridview - refresh after DB Update on another form
I'm setting Datagridview DataSource in Form1. In Form 2 I update those records (works fine in DB), and after updating them I want to refresh Datagridview in Form1 too.
Here is my code in Form1_Button_Click:
OracleConn() 'connection to my DB
Using cmd As New OracleCommand()
Dim SQL As String = "Select * FROM MyTable"
If Chk1.Checked = True Then
cmd.Connection = OracleConn()
cmd.CommandText = SQL
cmd.CommandType = CommandType.Text
Dim dt1 As New DataTable
Using dad As New OracleDataAdapter(SQL, OracleConn)
dad.Fill(dt1)
End Using
End if
DataGridView1.DataSource = dt1
End Using
How can I do this most easily ?
Write down a method to fill your data grid in Form1
public sub FillData()
OracleConn() 'connection to my DB
Using cmd As New OracleCommand()
Dim SQL As String = "Select * FROM MyTable"
If Chk1.Checked = True Then
cmd.Connection = OracleConn()
cmd.CommandText = SQL
cmd.CommandType = CommandType.Text
Dim dt1 As New DataTable
Using dad As New OracleDataAdapter(SQL, OracleConn)
dad.Fill(dt1)
End Using
End if
DataGridView1.DataSource = dt1
End Using
end sub
Call the method into your button click ( Form1_Button_Click)
Private Sub Button1_Click(ByVal sender As System.Object, ByVal e As System.EventArgs) Handles Button1.Click
FillData()
End sub
Now you can call the method of Form1 from Form2 directly after update,
Form1.FillData()
Records will reflect in Form 1
NB: Method should be public in access.
Actually a bad one, even tho it is working. Possible User selections will be lost, Rows arent actually updated, instead just completly new loaded. You might want to take a look into these 2 posts; http://stackoverflow.com/a/33702351/5686960 http://stackoverflow.com/a/40639530/5686960
Instead of set
DataGridView1.DataSource = dt1;
Use a global BindingSource object, and bind it to both DataGridView, then all the changes made in one DataGrid will be reflected in the other.
BindingSource bsData = new BindingSource();
bsData.DataSource = dt1;
DataGridView1.DataSource = bsData;
DataGridView2.DataSource = bsData;
In your 2nd Form, bind the controls, TextBox, ComboBox, etc to this BindingSource and again, you don't need to refresh de DataGridView.
thanks for answer, but I used vipin solution since I dont have 2nd datagridview. I update records from Databinded textboxes.
It doesn't matter, you have a 2nd form with controls, bind this controls to de BindingSource and you don't need to refresh nothing at end
If I am allowed to make a guess; I think LuckyLuke82 has a second form for editing an entry in the DataGridView. All changes made are saved into the database and not on the client. That's why a global datasource wouldn't help in this case. Please correct me if I am wrong :-)
By default, using ADO.Net you're working on a disconnected dataset, until you call DataAdapter.Update() or DataAdapterManager.UpdateAll() methods. You can Add/Update/Delete record in your DataGrid, and then Commit or Reject all changes.
@Luke, you're right, I make changes directly to Oracle and afterwards I refresh Datagridview which is visible too, but located in different form (which is opened in Tab control).
@mcNets, I tried your suggestion too, but It doesn't work....Maybe I just fixed It wrong or some obstacle in code which I don't see. Thanks for reply !
| common-pile/stackexchange_filtered |
z-score versus log standardisation of stock prices for calculating correlation; which to use (in ML clustering, distance measure)?
I need to compare (get correlation between) different financial instruments (stocks).
The problem is that different stocks will have different price scales.
I was thinking of using z-score standardization on my price time series vectors $\boldsymbol{x_{j}}$:
$$\boldsymbol{x_{j}'} = \frac{\boldsymbol{x_{j}} - \bar{\boldsymbol{x_{j}}}}{\sigma} $$
Now a paper I read uses natural log standardization to achieve the same goal:
$$\boldsymbol{x_{j}''} = ln(\boldsymbol{x_{j}})$$
Is one approach correct and the other incorrect; are both usable, if so which one is preferred and what are the nuances?
Additional info based on answers and comments:
Let me add some context where this is coming from (more of a statistics / machine learning perspective). I want to do classification of different equity markets. Standardisation is a "standard" part of data pre-processing for forecasting or clustering (this is a clustering problem). And I am guessing if I were to use things like expected return and volatility AND Euclidean distance as my measure, it would make sense. However, I have chosen to use correlation as my distance measure. And this is where the question arises. I do not understand why, statistically I should use returns. I can kind of see how z-score is already incorporated into correlation (rather than covariance), although not 100%, not quite sure about log transformation. Since I am doing correlation I am measuring by default the linear relationship; I thought there would be no difference in the linear relationship between X and Y or ln(X) and ln(Y), it just makes sure the scales are the same. But then again the scales do not matter here since we are "standardizing" in the denominator of the correlation equation. Here is the link to the paper that used ln(price).
First let me say that correlation between two stocks is almost always taken in return space. First you would transform your price series to a return series and take the correlation.
Now let me address your question about correlations in general. Note the formula for correlation:
$$
\rho_{XY} = \frac{E[(X-\mu_X)(Y-\mu_Y)]}{\sigma_X\sigma_Y}
$$
From this, you can see that correlation is a normalized value that is invariant to scaling/shifting of the inputs. So using your "z-score standardization" method, will actually give you the exact same correlations!
The log standardization will measure the linear relationship between the log transformed variables. You only want to do this if you think there should be a linear relationship between $\textrm{ln}(X)$ and $\textrm{ln}(Y)$.
Very important point: "the correlation between two stocks is almost always taken in return space. First you would transform your price series to a return series and [then] take the correlation."
@noob2 My comment was too long so added it to the opening post.
You are right, standardization does not help. I can better see it in this version of he correlation equation $$\rho(X_{1},X_{2}) = E\Big[ \big( \frac{x_{1} - E(X_{1})}{\sqrt{V(X_{1})}} \big) \big(\frac{x_{2} - E(X_{2})}{\sqrt{V(X_{2})}} \big) \Big]$$. Most papers use log() transformation. I think the reason being making data non-stationary (can't really appreciate why this is important at this moment in time), rather than having data "on same scale" or that "investors work with returns". From my trial and error also noticed that transforming real rather than nominal price makes a difference.
| common-pile/stackexchange_filtered |
Path Finding - Autonomous Rover
I am working on a navigation task for an autonomous rover. Right now, the rover can calculate the shortest path between the current position and a final destination given certain obstacles. I am using dijkstra's algorithm to find the shortest path and it's working well.
The rover has a fixed range with which it can identify that there is an obstacle infront of it or not. The problem I am facing is that the rover gets stuck in an infinite loop of same path (from point A to B, then point B to A) when an the final destination happens to be on a region that cannot be reached or seen by the vision of the rover.
My question is how should I detect that I am stuck in this loop and I can't reach the destination now that I should get a new final destination or just quit.
The problem with the infinite loop is not clear (to me). Would you mind explaining it a bit more (preferable with an example).
It depends on your application, if the destination cannot be reached you can detect this checking that all the reachable places in your map are visited and the destination is not one of them. Otherwise if you just missed some possible path, once you detect that you are stuck you can try to change your rover behavior. For example trying to go close to walls with sensors (if you have one) pointing to it.
| common-pile/stackexchange_filtered |
Is there any chance to retrieve files after deleting from HDFS trash
In our project we have four environments such has Production, Development, UAT and QA. I am using the UAT environment.
We have a cluster with 43 Data Nodes. My role is HDFS clean up. Unfortunately I deleted some jobs from Hive database and also from trash. Is there any chance to retrieve those files and tables?
I am using fallowing commands:
hadoop fs -du -h / | grep ' T'
hadoop fs -rm -r source path
hadoop fs -rm -r .Trash/some path
Normally deleting file from trash is permanent. You can not recover it. But the only way to recover is to use some forensic tools. Like disk recovery tools. The recovery tools will only recover if the memory is not overwritten.
Trash should be emptied using -expunge command. Probably files are not deleted by using -rm -r command.
| common-pile/stackexchange_filtered |
Not getting @OneToMany Annotation in spring boot
I am not getting @OneTomany or @manytoone(relational kind of annotations) annotataions in spring boot ,so how do I get that ? Is there i required to change the POM file?
elaborate more. When and where u getting error?
You do have javax.persistence jar in your project build path.right?
I am creating maven project using spring and trying to create some entity classes i need to define the realtionship between some terms bt it is not showing the intellesence in eclipse...What should i do?
Add javax.persistence jar dependency in your POM file
<dependency>
<groupId>javax.persistence</groupId>
<artifactId>persistence-api</artifactId>
<version>1.0</version>
</dependency>
After it, update(Maven update) your project.It should work.
Done . dependancy was missed by me. Thanx :)
you may select my answer as helpful :)
is thereany other dependancy specific to the spring ?? for relationship annotations??
I believe it should cover most of annotations.
yes . but iwas trying to get all the references from one roof , actually i m using spring boot. so thats why...so i got the solution. we can refer it from
org.springframework.boot
spring-boot-starter-data-jpa
</dependency>
| common-pile/stackexchange_filtered |
Remove Null values in JSON and Update JSON
I have JSON Array as a string by serializing a list using Newtonsoft as below
[{"ID":"1","Name":"somename","Class":"12","Section":null},{"ID":null,"Name":"somename","Class":"13","Section":null},{"ID":2,"Name":"somename","Class":null,"Section":"A"}]
I need to convert this JSON by removing the NULL values to another JSONString like below
[{"ID":"1","Name":"somename","Class":"12",},{"Name":"somename","Class":"13",},{"ID":2,"Name":"somename","Section":"A"}]
Is there a way I can use Newtonsoft for this or how do i do this.
You can use JsonSerializerSettings with NullValueHandling:
var result = JsonConvert.SerializeObject(obj,
new JsonSerializerSettings()
{
NullValueHandling = NullValueHandling.Ignore
});
| common-pile/stackexchange_filtered |
Where is content of <?php echo $this->getChildHtml('left') ?>?
I have a question about the file 2-columns-left.phtml of my blog of my theme Legenda. I want edit the title of my block-blog-categories but without success.
I see that the content of block is in the
<div class="col-left sidebar col-sm-4 <?php if(Mage::app()->getFrontController()->getRequest()->getModuleName() != 'blog'): ?>col-md-3<?php endif; ?>">
<?php echo $this->getChildHtml('left') ?>
</div>
Now, where is the file that is recalled from getChildHtml('left')? How should I do for edit the title of "Blog Categories"?
The block left is a very generic block in Magento.
It is defined in the file app/design/frontend/base/default/layout/page.xml and is simply a block of type core/text_list.
<block type="core/text_list" name="left" as="left" translate="label">
<label>Left Column</label>
</block>
The basic idea is that you can simply use this block and fill it with the content you need.
You can simply reference it and add items. A perfect example of this is the customer navigation in file app/design/frontend/base/default/layout/customer.xml.
<reference name="left">
<block type="customer/account_navigation" name="customer_account_navigation" before="-" template="customer/account/navigation.phtml">
<action method="addLink" translate="label" module="customer"><name>account</name><path>customer/account/</path><label>Account Dashboard</label></action>
<action method="addLink" translate="label" module="customer"><name>account_edit</name><path>customer/account/edit/</path><label>Account Information</label></action>
<action method="addLink" translate="label" module="customer"><name>address_book</name><path>customer/address/</path><label>Address Book</label></action>
</block>
<block type="checkout/cart_sidebar" name="cart_sidebar" template="checkout/cart/sidebar.phtml">
<action method="addItemRender"><type>simple</type><block>checkout/cart_item_renderer</block><template>checkout/cart/sidebar/default.phtml</template></action>
<action method="addItemRender"><type>grouped</type><block>checkout/cart_item_renderer_grouped</block><template>checkout/cart/sidebar/default.phtml</template></action>
<action method="addItemRender"><type>configurable</type><block>checkout/cart_item_renderer_configurable</block><template>checkout/cart/sidebar/default.phtml</template></action>
</block>
<block type="catalog/product_compare_sidebar" name="catalog.compare.sidebar" template="catalog/product/compare/sidebar.phtml"/>
<remove name="tags_popular"/>
</reference>
Here Magento simply adds in child blocks into the left block. You can see exactly where content is added by searching for the following in the layout files:
<reference name="left">
You can also see when the left column is removed by search for the following in layout files:
<remove name="left"/>
For more information on this block I would recommend reading the Structural block blog post from Inchoo
| common-pile/stackexchange_filtered |
"EXC_BAD_ACCESS" in Qt
Basically, below is my main.cpp and when I try to run it with Qt's debugger, I get the "EXC_BAD_ACCESS" error ("Could not access memory") along with an arrow next to the first line of main (where it says Puzzle puzzle;). I thought it might be a problem with my Puzzle class, but when I moved that line elsewhere, I still got the bad access error with the debugger leaving the same yellow arrow by the first line of main. What's causing this error? My program ran fine half an hour ago, and then started throwing this error and I haven't even modified the code since it last worked. Also, this is one of my first projects in C/C++, so I'm not totally familiar with garbage collection. Could it be something to do with memory leaks or bad memory allocation?
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include "piece.h"
#include "puzzle.h"
#include "state.h"
using namespace std;
//function prototypes
Puzzle initPuzzle(string*, int);
int countWords(string);
//count the number of words (separated by white space) in a string
int countWords(string s){
int words = 0;
char * temp = new char[s.size() + 1];
copy(s.begin(), s.end(), temp);
temp[s.size()] = '\0';
temp = strtok (temp, " ");
while (temp != NULL){
words++;
temp = strtok (NULL, " ");
}
delete(temp);
return words;
}
//first checks validity of input
//if error(s), display appropriate message & exit program
//otherwise, returninstance of puzzle class from input file
//params: lines = array of strings, each of which is a line from input... size = # of elems in 'lines'
Puzzle initPuzzle(string * lines, int size){
//create instance of puzzle
//if bad piece found, throw it out
//if first piece (Z) is invalid, the next piece becomes goal piece
//if there are 0 valid pieces, display error to user and exit program
Puzzle ret;
int rows, cols;
if(size < 2){
//not enough lines for valid input
cout << "Error: Input too short" << endl << "Exiting program..." << endl;
exit(0);
}
istringstream iss(lines[0]);
if((iss >> rows >> cols) && countWords(lines[0])==2){
ret.rows=rows;
ret.cols=cols;
} else {
cout << "Error: Invalid first line" << endl << "Exiting program..." << endl;
exit(0);
}
if(rows < 1 || cols < 1){
cout << "Error: Invalid dimensions" << endl << "Exiting program..." << endl;
exit(0);
}
//now check the rest of the lines (ie the pieces)
for(int i=1; i<size; i++){
Piece newPiece;
int startRow, startCol, width, height;
char direction;
istringstream iss(lines[i]);
if(countWords(lines[i])==5 && (iss >> startRow >> startCol >> width >> height >> direction)){
//row is formatted correctly, create instance of Piece
newPiece = Piece(startRow, startCol, width, height, direction); //validate this piece later... if valid, add to pieces
} else {
//invalid row... entire input is invalid
cout << "Error: Invalid row(s)" << endl << "Exiting program..." << endl;
exit(0);
}
//now validate temporary piece...
//first make sure piece doesn't fall outside of grid
if(newPiece.startRow < 1 || newPiece.startCol < 1 || newPiece.startRow-1 > (rows - newPiece.height) ||
newPiece.startCol-1 > (cols - newPiece.width)){
//newPiece goes over the edge of the puzzle grid
cout << "Piece goes beyond grid... Throwing it out" << endl;
continue;
}
if(newPiece.direction != 'b' && newPiece.direction != 'h' && newPiece.direction != 'v' && newPiece.direction !='n'){
//newPiece has invalid direction
cout << "Piece has invalid direction... Throwing it out" << endl;
continue;
}
if(ret.pieceCount!=0 && ret.pieceOverlap(newPiece)){
//current piece overlaps existing one
cout << "Piece overlaps another piece... Throwing it out" << endl;
continue;
}
//if loop iteration reaches this point, piece is valid and can be added to puzzle
cout << "Piece is good!" << endl;
ret.addPiece(newPiece);
}
if(ret.pieceCount == 0){
//all pieces were invalid
cout << "Error: Puzzle has no pieces" << endl << "Exiting program..." << endl;
exit(0);
}
//now assign id's to the pieces...
for(int i=0; i<ret.pieceCount; i++){
if(i==0){
ret.pieces[i].id = 'Z';
} else {
ret.pieces[i].id = i;
}
}
return ret;
}
int main()
{
Puzzle puzzle; //single instance of puzzle class... initialized later after input & piece verification
string inputFile; //name of input file... provided by user
string line; //single line from input file
string * inputLines = new string[9000]; //array of lines from the input file
ifstream infile;
int size = -1; //size of inputLines array, initialized to -1
cout << "Enter name of input file: ";
cin >> inputFile;
infile.open(inputFile.c_str());
if(infile){
while(infile){
size++;
getline(infile,line);
inputLines[size] = line;
}
infile.close();
} else {
cout << "Error: Input file could not be opened" << endl << "Exiting program" << endl;
exit(0);
}
puzzle = initPuzzle(inputLines, size); //now check the input for validity, and if valid, initialize puzzle
return 0;
}
One mistake (two really) is within the function countWords():
temp is created using new[] but is deallocated use delete, it must be delete[] (new -> delete and new[] -> delete[] and avoid explicit dynamic memory management whenever possible)
the value of temp is not the value it was originally assigned which it must be when delete[]ing
Explicit dynamic memory allocation can be avoided completely by using a std::istringstream instead to count the words:
std::istringstream in(s);
std::string ignored;
while (in >> ignored) words++;
Other points:
prefer std::vector to explicit dynamic memory allocation management:
std::vector<std::string> inputLines; // and use 'push_back()'.
always check result of input operations immediately to ensure success:
if (cin >> inputFile)
{
ifstream infile(inputFile);
if (infile)
{
std::string line;
while (std::getline(infile, line)) lines.push_back(line);
}
}
Thank you so much! Your original answer solved my problem, but the other points are great to know, too. Thanks for the help
Also, I've decided to go with istringstream to count words instead of my unnecessarily messy & unwieldy countWords function. Thanks, again!
| common-pile/stackexchange_filtered |
How to change the color of text in an email template?
I don't have much knowledge of JS but I am just creating a simple template for email. In this template, I am trying to change the color of a text when the button is clicked. Although it is working fine on the localhost when I send an email, it does not work there and the console gives me no message.
Unchecked runtime.lastError: The message port closed before a response was received. This message appears even before clicking on the button.
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
</head>
<body>
<center><h1>We're glad you're here, {{.GetAccountName}}</h1></center>
<center onclick="func()" id="text">We just want to confirm it's you.<br><br></center>
<center>
<button onclick="func()">
Click to confirm your email address
</button>
<center>
<center><br>If you didn't create a proctl account, just delete this email.</center>
<script>
function func(){
document.getElementById('text').style.color='blue';
}
</script>
</body>
</html>
The email client is likely sanitizing the content and preventing scripts from executing to prevent injection attacks.
You may confirm by inspecting the source for the message on delivery.
Then what to do?
If it is coming from the email client, there is nothing you can do about it since the capability is just not supported.
Is there any other way? If not then how a user can confirm an email address even if this small functionality is not working?
| common-pile/stackexchange_filtered |
Kafka connect could not find JdbcSinkConnector even if it's installed
I have installed JDBC connector running confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.2.5 inside my kafka connect connector, but when I try to implement a new sink using I have the following error : Failed to find any class that implements Connector and which name matches io.confluent.connect.jdbc.JdbcSinkConnector
Sink I'm trying to use
{
"name": "jdbc-sink-connector",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"tasks.max": "1",
"topics": "my_topic",
"connection.url": "jdbc:postgresql://ip:port/postgres",
"connection.user": "postgres",
"connection.password": "PASSWORD",
"auto.create": "true"
}
}
I'm using confluentinc/cp-kafka-connect:6.1.0 image
If I build an image with confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.2.5 and use this image it works.
So looks like we need to restart kafka connect after install ?
we need to restart kafka connect after install ?
Yes, the JVM doesn't pick up new plugins until (re)started
| common-pile/stackexchange_filtered |
Using SQLite in-memory with EF Core - context.Database.EnsureCreated keeps throwing 'AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY'
I am currently trying to write unit tests for a project in C# that relies heavily on Entity Framework Core. In order to do that I have decided to use a SQLite in-memory database, and taking inspiration from the example that can be found here, I have written the following class :
public class SqliteEFCoreContextInitializer<T> : IDisposable where T : DbContext
{
private SQLiteConnection connection;
private DbContextOptions<T> contextOptions;
private Func<DbContextOptions<T>, T> contextInstantationFunction;
// The parameter _contextInstantationFunction is there solely to get around the impossibility to directly call the context constructor from the generic type T
public SqliteEFCoreContextInitializer(Func<DbContextOptions<T>,T> _contextInstantationFunction)
{
contextInstantationFunction = _contextInstantationFunction;
connection = new SQLiteConnection("Data Source=:memory:");
connection.Open();
contextOptions = new DbContextOptionsBuilder<T>().UseSqlite(connection).Options;
using T context = CreateContext();
context.Database.EnsureCreated();
}
public void Dispose()
{
connection.Dispose();
}
public T CreateContext()
{
return contextInstantationFunction(contextOptions);
}
}
This class is meant to be instantiated and then used like this :
// Instantiation
SqliteEFCoreContextInitializer<MyContext> sqliteContextInitializer = new(options => new MyContext(options));
//Getting the SQLite in-memory DbContext
MyContext context = sqliteContextInitializer.CreateContext();
The problem I have here is that, regardless of my database schema, I seem to always get the error
SQL logic error: AUTOINCREMENT is only allowed on an INTEGER PRIMARY KEY
when executing context.Database.EnsureCreated();. After some experiments I have found out that this error is thrown even when my schema only includes a single table with no columns besides the integer primary key! What exactly am I doing wrong here?
Also, I don't know whether or not this is relevant but on this project I am forced to work with the now outdated EF Core 5.0.
forced to work why? That's not outdated, it's completely out of support and not required unless you also target the out-of-support EF Core 5. If you're on a supported .NET Core version nothing prevents you from using a supported version
What is the dbcontext? You likely have an unsupported db generated value like a secondary key.
@user1937198 I indeed have a few generated secondary keys in the dbcontext of this project, but even when I run the same code on an almost completely empty dbcontext with no such thing I get the same error.
Yesterday embedded SQLite in-memory in oour test project. Well, I did it more elagant. Which DbProvidfer do you use for your DbContext in real project. Have you found which table cause this issue?
After some more experiment I figured out that the error was caused by my primary key data types being set to NUMBER(10) when Sqlite requires them to be INTEGER. Changing the data types solved the problem. I must admit it didn't occur to me at first that the two data types would be treated in different enough ways to create this error (they're both meant to represent integers after all) but I now realize that I was being very naive.
To be fair to SQLite, the error message tried to tell you this, even if it was unclearthat it meant "the exact datatype named INTEGER" not "any integral datatype"
| common-pile/stackexchange_filtered |
What's the difference between an APRS "item" and an APRS "object"?
What's the difference between an APRS "item" and an APRS "object"?
A kenwood manual says Objects have timestamps, items do not. What does that mean, and how should it be used?
Also in general: What is a good resource for learning about APRS? aprs.org is not one, since it only has angry rants about implementations doing it wrong, and no explanations about what's right.
Per the spec, which unsurprisingly is found at aprs.org, objects are intended for moving or animate objects of the same nature as a station beacon (people, vehicles, storms, etc.) while items are meant for permanent points of interest (they may come and go, but they're not expected to move) such as hospitals. However, there's no real difference between them, other than the fact that, as Kenwood says, object reports contain timestamps. In practice, clients seem to display them and age them out identically.
Both of them are almost completely equivalent to ordinary position beacons (which can have a timestamp or not), except that an object or item has a name which is different from the callsign of the originating station.
Yeah I read the spec. Sorry, should have added that to my question. I think you confirmed my vague understanding though: It's a layer violation in the protocol standard, and really shouldn't have been distinct types at the protocol layer. (though this is my opinion and you don't have to agree).
I was thinking they had to be actually different, but no, not really.
@Thomas I do agree, it seems unnecessary to have reserved different object types for them. APRS is kind of simultaneously overengineered and underengineered :)
| common-pile/stackexchange_filtered |
React indiana scroll - initial scroll doesn't work
I would like to initialize my component to an initial scroll with react-indiana-drag-scroll but it doesn't work and I don't know why.
You will find the code on this sandbox.
I would like to initialize the scroll on the red line (current time), but the scrollTo function doesn't work in this code.
ScrollTo function didn't work because the component isn't fully constructed when we call the function.
Instead of constructing it in componentDidMount(), I did it in componentWillMount() and called scrollTo in componentDidMount().
See the code : here
| common-pile/stackexchange_filtered |
Recursive function, high performance critical
def uniop_nested(func,o_list):
def inner(i_list):
if isinstance(i_list[0],np.ndarray):
return map(func, i_list)
else:
return map(inner, i_list)
return inner(o_list)
def binop_nested(func, o1, o2):
if not isinstance(o1,np.ndarray):
return [binop_nested(func, i1, i2) for (i1,i2) in zip(o1,o2)]
else:
return func(o1,o2)
def add_nested(s1,s2):
return binop_nested(np.add,s1,s2)
My code need to work with lists of ndarrays and list of lists of ndarrays. Profiling shows this is some of the most performance critical code in my project.
How can I optimise it?
Can I rewrite the recursion as a loop?
Is there nice way to rewrite, them as Cython or a C extention? (I have no experience with this)
My Stack Overflow question here indicates that changing datatypes is probably not going to the solution.
More info:
Operands (o1 o2, s1, s2) are short lists. Profiling has shown me that using it.izip is slower.
Function return results are unlikely to be repeated. As the ndarrays are full of floats being tweaked with mathematical operations based of floats. (We are talking a large segment of Rn possible values)
Functions being applied are simple, the add_nested is the most common op by far, but there are a few onthers like uniop_nested(np.zeros_like, o_list).
ndarrays are of different sizes/shapes. (so a multidimentional ndarray won't work)
Context:
This is being used for training Restricted Boltzmann Machines (RBMs) and Neural network.
I have a generic "Trainer" class,
that takes a Trainee class as a parameter.
the Trainee class exposes a few methods like:
Get_update_gradient - a function that returns (for a RBM [Restricted Boltzmann Machine]) a list containing a ndarray of weight changes and 2 ndarrays of bias changes, or (for a multilayer neural net) a list containing a list of weight matrix changes and a list of bias changes
knowledge: a property exposing either a list containing (for a RBM) a 2 bias arrays and a weight matrix or (for a neural net) a list of weight matrixes and bias arrays
It may seem that the Trainer class is simple, and unnesc, however it is moderately complex, and its use is common between the RBM and neural net classes. (Both benefit from the use of momentum and minibatchs)
A typical use is:
trainee.knowledge = binop_nested(lambda current_value,update: learning_rate*update+current_value, trainee.knowledge, updates)
If you're mostly interested in lists of ndarrays, why not replace recursion with a simple loop without all the 'isinstance' stuff?
arager: Because it need to dig down the list (of list) til it gets down to ndArrays
Another idea - if you know in advance at which depth the list turns into ndarray, pass that depth as an extra argument. I feel that isinstance is what causing the performance hit. Getting rid of branching would be better though.
Hmm, could be done (for certian parts at least), it means tracking a state variable though. conviently in the palce this can be done, there is very few statevariables, so it isn't too bad.
Does the profiling tell you how much time is spent in this code vs. in the func called from here?
still not clear what you are trying to do, can you just give a simple example of what you are trying to do, and what the expected result is? With real functions and a chunk of your real data, since right now it still sounds a bit too fuzzy/abstract/...
I agree with @usethedeathstar. You need to give us more context so we can understand what you are trying to achieve. Iterating over a list of ndarray is usually an anti-pattern (better to use a multi-dimensional ndarray if possible).
Ok, I hopefully I have added as much context as I can.
(I'm out of ideas for what other useful information i can give)
@Oxinabox: What is "tringing"? (Typo for "training"?) What is an RBM? (A Restricted Boltzmann machine?) What is an RMB? (Typo for "RBM"?)
@GarethRees: Fixed. Your guesses were correct. Thanks.
For recursion, you can try adding an @memoize decorator to it, to speed it up.
import functools
def memoize(f):
cache= {}
@functools.wraps(f)
def memf(*x):
if x not in cache:
cache[x] = f(*x)
return cache[x]
return memf
Not sure if in your case it will speed up a lot, but if you try with fibonacci, you will see that it vastly speeds up the recursions, since it caches previous results.
This is something that speeds up recursion things in general. For your specific case, we need a bit more info, on what your functions are, and what you want to achieve to get a more specific answer.
Won't work as this is not a pure function. It has state. Actually it might be pure. But in anycase the same result is almpst never returned twice.
Added more info to question
@Oxinabox, not sure what's that about "not a pure function," but the function decorated with this memoize does always return the same output for the same input arguments (which have a limitation that they must be hashable or immutable).
Memorisation can only be done on Pure Functions:
http://en.wikipedia.org/wiki/Pure_function
In my case the functions may be pure (maybe),
but it doesn't matter because I never call the function with the same inputs twice.
Memoration is a great technique, for recursive functions, under the right circumstances.
Unfortunatly for me, this is not them.
(Not saying antyhing is wrong with your answer though, I +1 it)
Cython function calls are much faster.
(I suspect cython type checking is too).
I recompiled your code with cython,
a simple matter of changing they file name to .pyx,
adding: cimport cython to the top,
and compiling it
Here is a simple test:
a = np.arange(788)
b = np.asarray([0.01]*200)
values = [[a,0.1*a,0.01*a],[a,0.1*a,0.01*a],b,b]
%timeit add_nested(values,values) #Define in this IPython notebook
10000 loops, best of 3: 32 µs per loop
%timeit c_add_nested(values,values) #Define in this IPython notebook with cythonmagic
10000 loops, best of 3: 32 µs per loop
%timeit numpyutil.add_nested(values,values) #In a seperate file called numpyutil.pyx/so
10000 loops, best of 3: 32 µs per loop
That is about a 25% speed increase.
| common-pile/stackexchange_filtered |
SVProgressHUD likely HUD in landscape or Horizontal mode
I'm writing a view in "landscape mode" implement by changing the view's transform. And I need a Hud 4 showing infos in this "landscape mode"(I've disable the landscape left and right mode). I've searched the API of SVProgressHUD and seem there's no API of changing it
Any Suggestion?
PS: I'm not in landscape mode But trying implement a landscape likely HUD in the Portrait
You can use https://github.com/jdg/MBProgressHUD
Are wah Dhanesh, You are here :)
if u used the view transform ,viewcontroller does not rotate, the viewcontroller view only rotate , in here you can't get the landscape
| common-pile/stackexchange_filtered |
Chat App for Android using a XMPP Server and Google Cloud Messaging (or the newer Firebase Cloud Messaging) for Push Notifications
I'm developing a Chat App for Android. I have been reading for weeks about XMPP and Google Cloud Messaging (and Firebase Cloud Messaging) and I am still very confused.
Currently, I already setup an XMPP server locally (Ejabberd) and successfully connected my Android App to it using the Smack library.
To my understanding, I need to use GCM or the newer FCM for Push Notifications, so I already created a project in Google Cloud Platform. My Android App can connect to it using the Smack library too (instead of connecting to my XMPP server directly). In my server I have an small Java app that connects to GCM using the Smack library too.
Everything is fine until here. My big confusion is: How can I use my XMPP server with GCM for Push Notifications? Every documentation, examples, guides, tutorials I found for server-side implementations just tell me how to connect to GCM but none tell me how to use my XMPP server in conjunction with GCM. What I'm missing? My Java app just connects to GCM, receive and send messages from and to GCM, but my XMPP server is just sitting there doing nothing. Actually my Android App and server Java App use GCM exclusively, not my XMPP server.
I hope someone can help me to understand this as a whole, I am obviously missing some key elements to achieve this implementation.
Hey SF, can you help me out with the connecting XMPP server to android ?
can you get your answer?
can you please help me i'm facing same problem here http://stackoverflow.com/questions/41630978/how-to-use-smack-4-1-for-how-to-send-info-query-packet-to-xmpp-server
http://stackoverflow.com/questions/41734043/chat-app-for-android-using-a-xmpp-server-and-firebase-cloud-messaging-for-push-n
You need to mix both Ejabberd and FCM together, that's how all the big chat apps do it out there. For the very basics, there are 3 components: App Server connected via XMPP to FCM, Ejabberd and your client app.
When the app is in the foreground, you use Smack library to connect directly to your Ejabberd server, send messages, change user's presence, etc. The connection to your Ejabberd is kept during that time. You don't send upstream messages during this time!
Once the user navigates away from your app, you close the connection. The user is now considered "Away" or "Offline".
From this point and on, your App Server communicates with FCM to send downstream messages to the device using Smack library as well.
On the client device: You handle the incoming message and show a notification. With Android N, users can reply directly from the notification. I assume that in this case, you would use FCM to send an upstream message to your app server since during that time, there's no active connection to your Ejabberd server.
Once the user taps on the notification, the app comes back to foreground and you re-connect to Ejabberd and back to step 1.
This is the very basic description of the architecture you should have to achieve what you want.
In step 1 when you say that "You don't send upstream messages during this time", do you mean upstream messages to FCM right? In other words, the client device always send messages directly to Ejabberd when in foreground. The client device receive message from FCM and Ejabberd or just from FCM?
Yes, I referred to FCM. The client device receives messages from Ejabberd. While the app is in background, you use FCM to notify the device of new messages. Even though you can send the up to 4KB payload with FCM, I suggest that you only send a so call "push-to-sync" message using FCM and get the actual messages from Ejabberd. So Ejabberd is the main data store for all messages and your App Server with FCM downstream messages is dedicated to one thing only - making the client sync with Ejabberd. This also means that Ejabberd and your App Server need to communicate of course.
Now I need to know how to connect Ejabberd and the App Server. By the way, I already have the Client App up and running using only FCM for downstream and upstream with no problems, I need to use Ejabberd too for scalability matters right? What are the limits of a FCM-only approach?
Check out this post http://stackoverflow.com/questions/16189612/ejabberd-and-push-notification for forwarding Ejabberd offline messages to your app server.
It's not only for scalability, but the fact that Ejabberd is designed to be a chat server so no need to re-invent the wheel :) I'm unsure of other limitations of FCM aside the 4KB per message. Please accept this answer if it helped you, thanks! :)
This is a sample java project to showcase the Firebase Cloud Messaging (FCM) XMPP Connection Server. This project is a very simple standalone server that I developed as a base of a larger project. It is an application server that we must implement in our environment. This server sends data to a client app via the FCM CCS Server using the XMPP protocol.
https://github.com/carlosCharz/fcmxmppserver
And also I've created a video in youtube where I explain what it does.
https://www.youtube.com/watch?v=PA91bVq5sHw
Hope you find it useful.
@CarlosBecerraRodríguez i have tried to implement your server at google cloud module(endpoints) but i get an error and downstream messages are not sent.
@CarlosBecerraRodríguez error is The following addresses failed: 'fcm-xmpp.googleapis.com:5236' failed because java.net.SocketException: Permission denied: connection to (10, [2607:f8b0:4001:c06::bc]:5236,6) denied due to policy i have even opened an issue @ Github
@JiTHiN sorry to answer too late. I have not received notifications from stackoverflow. I indeed receive notifications from the GitHub project. Did you put your error there?
You should set up a Java server which connects to FCM (formerly GCM). Then you can from your device send an upstream message to FCM who then sends that upstream message to your java server and then within that Java server you can handle that upstream message to send a downstream message to the targeted device(s). Then on the device side you can handle those downstream messages being received to give a push notification.
A few useful links:
How to send an upstream message:
https://firebase.google.com/docs/cloud-messaging/upstream#sample-send
How to receive and handle downstream messages:
https://firebase.google.com/docs/cloud-messaging/downstream#sample-receive
How I set up an example Java server:
https://stackoverflow.com/a/38170310/4433653
Really thanks for your answer but I'm still confused. I already have a Java server which connects to FCM (very similar to the code of that link), but it works without my XMPP server, it just use FCM for everything. What I am missing?
FCM has two supported protocols HTTP and XMPP. HTTP is used to ONLY send messages from your server, through FCM, to your device. XMPP allows both server to device and device to server messages, both through FCM.
@ArthurThompson Right, so... How can I use my XMPP server with FCM for Push Notifications?, Every implementation, example, guide I found (including the one in the link of this answer) just use and explain the FCM part, what about XMPP? That Java server works without my XMPP server running at all.
@Santiago Fermin You would make an XMPP connection between your app server and FCM (CCS to be exact), you can make that connection using something like the Smack Java XMPP Library: https://www.igniterealtime.org/projects/smack. Your app server will be the XMPP server.
@ArthurThompson And what is the role of my XMPP server (Ejabberd)? That is my confusion, with FCM, Ejabberd does nothing, anything connects to it.
You don't need an XMPP server, CCS (part of FCM) is the XMPP server. Your app server would be an XMPP client to CCS and communicate with it.
I am also building chat application in Android using Smack v.4.1.8 which latest version of Smack, and our server is using ejabberd. I am not use any third party like GCM or Firebase to push downstream message from user to user message. If i am not wrong you going to use GCM or Firebase to push user to user message, if yes just dont do that.
Why?
TCP protocol is keep listening a invoke listener which you registered in App when the connecting establish and still connected. Smack has a listener which called "addAsyncStanzaListener()"
addAsyncStanzaListener is used to listen a receive packet in App. They have
public void processPacket(Stanza packet)
You can invoke that, and you will listen the packet over the time.
I do a research about keeping stable connection of Smack. Most of Android Smartphone have each configuration, and restriction. We cannot test all of devices.
Here my tips to keep connection stable:
Use ForegroundService instead BackgroundService, i propose this approach because most of Android Device have restriction to an app which run in background. They will kill app when the app swipe from task manager. (e.g Asus zenphone have power management)
ForegroundService will prevent app from being idle.
Register ReconnectingManager
Register Pingfailed
Register ServerPingWithAlarmManager
And that's it. Any enquiries, just comment below :)
Best Regard
R Aditya Gumay
Your suggestion is valid for a connection with tcp/ip never drop. which is not the case in Mobile 3G/4G network. So in real 3G/4G network, the mobile client/APP might be in zombie state(disconnected).
That is why you need Firebase/FCM to come to play.
| common-pile/stackexchange_filtered |
Sync SQLite Database to App Engine Datastore?
The android app stores the data in SQLite database during the offline mode. When online I want the app to sync (in both direction) with the datastore(database) in cloud server (App Engine). How do I implement this functionality, so that I can show the data captured on phone on a web application. Also please suggest any simple alternative way if any..
This is an overly broad question that doesn't really suit SO. You are just going to get down modded.
I just write out my data as a String (using a format I can reconstruct my data with), pass that to AE, parse it and store/display it.
You could use json too.
or try http://mylifewithandroid.blogspot.jp/2010/10/client-server-communication-with-json.html
for the sync part I use a timestamp. If the timestamped result isn't recorded on the server, I record it. I send back the recorded stamps to the client and delete them from the store. Of course the server can also send back new results if a user's records were updated from a different client.
In general, you should implement some complex algorithm, that will be doing synchronization depending on your needs, and then make it in code on both side (server and client). This not quite a simple task, in general. Useful keywords for googling: SOAP, REST, JSON, ...
| common-pile/stackexchange_filtered |
Close a batch launched in VBScript with intWindowStyle 0
I have a scheduled task that launches a VBScript with parameters which launches a hidden bat.
This bat executes a routine in a endless loop.
The only way to be sure the bat is still running is from the TaskManager
If I want to close the batch I have to look for it there and kill the process.
Is there any other way to check if that batch is running or set a name to that process so I can look for it.
My Script code:
Set WshShell = CreateObject("WScript.Shell")
WshShell.Run """Path to bat"" " & WScript.Arguments.Item(0) & " """ & WScript.Arguments.Item(1) & """", 0
Set WshShell = Nothing
look at taskkill /?, espcially WINDOWTITLE then in your batch file after @echo off add title Sometitle where sometitle will be the window title to search for.
How can I give a title to a hidden window that has been launched by a vbs script?
Here's an example. in the batch-fle do title Test then run the vbscript, open cmd and run taskkill /FI "WINDOWTITLE eq Test" obviously that will kill the process, but to simply view it, tasklist will return the result.
Perfect. That works like a charm. I can check if the bat is already running with WshShell.Run "taskkill /fi ""WINDOWTITLE eq BatTitle""", , True and kill it before running a new one. Thanks
Does this answer your question? Killing processes in Vbscript
| common-pile/stackexchange_filtered |
Wrap Multiprocess Pool Inside Loop (Shared Memory Between Processes)
I'm using the Python package "deap" to solve some multiobjective optimization problems with genetic algorithms. The functions can be quite expensive, and because of the evolutionary nature of GA, it gets compounded pretty quick. Now this package does have some support to allow the evolutionary computations to be parallelized with multiprocess.
However, I'd like to go one step farther and run the optimization multiple times, with different values on some of the optimization parameters. For instance, I might want to solve the optimization problem with different values of the weights.
This seems like a pretty natural case for loops, but the problem is that these parameters must be defined in the global scope of the program (i.e., above the "main" function) so that all the sub-processes know about the parameters. Here's some pseudo-code:
# define deap parameters - have to be in the global scope
toolbox = base.Toolbox()
history = tools.History()
weights = [1, 1, -1] # This is primarily what I want to vary
creator.create("Fitness",base.Fitness, weights=weights)
creator.create("Individual", np.ndarray, fitness=creator.Fitness)
def main():
# run GA to solve multiobjective optimization problem
return my_optimized_values
if __name__=='__main__':
## What I'd like to do but can't ##
## all_weights = list(itertools.product([1, -1],repeat=3))
## for combo in all_weights:
## weights = combo
##
pool = multiprocessing.Pool(processes=6)
# This can be down here, and it distributes the GA computations to a pool of workers
toolbox.register("map",pool.map)
my_values = main()
I've investigated various possibilities, like multiprocessing.Value, the pathos fork of multiprocessing, and others, but in the end there's always a problem with the child processes reading the Individual class.
I've posed this question on the deap users' group, but it's not nearly as big a community as SO. Plus, it seems to me that this is more of a general conceptual Python question than a specific issue with deap. My current solution to this problem is just to run the code multiple times and change some of the parameter definitions each time. At least this way the GA calculations are still parallelized, but it does require more manual intervention than I'd like.
Any advice or suggestions are greatly appreciated!
Use the initializer/initargs keyword arguments to Pool to pass different values for the global variables you need to change on each run. The initializer function will be called with initargs as its arguments for each worker process inside of your Pool, as soon as it starts up. You can set your global variables to the desired values there, and they'll be set properly inside each child for the lifetime of the pool.
You'll need to create a different Pool for each run, but that shouldn't be a problem:
toolbox = base.Toolbox()
history = tools.History()
weights = None # We'll set this in the children later.
def init(_weights):
# This will run in each child process.
global weights
weights = _weights
creator.create("Fitness",base.Fitness, weights=weights)
creator.create("Individual", np.ndarray, fitness=creator.Fitness)
if __name__=='__main__':
all_weights = list(itertools.product([1, -1],repeat=3))
for combo in all_weights:
weights = combo
pool = multiprocessing.Pool(processes=6, initializer=init, initargs=(weights,))
toolbox.register("map",pool.map)
my_values = main()
pool.close()
pool.join()
Thanks for the fast result. Here's my result: TypeError: Can't instantiate abstract <class 'deap.creator.Fitness'> with abstract attribute weights. The creator needs to have well-defined weights.
If I move all the creator stuff inside the "init" function (and add a corresponding global toolbox), then I get errors such as AttributeError: 'NoneType' object has no attribute 'decorator', etc. The toolbox class has several "register" and "decorate" methods that are also inside the "init" function at this point (left out for brevity) that have to come after the toolbox = Toolbox() declaration.
@user3325401 ok, I just downloaded deap and got the example working (see the edit above). Without seeing your actual code, I don't know if it will work for you, though. The idea is to only set the values that depend on weights inside init, and do everything else in the parent process.
Thanks again. On second though, I think the issue is that the toolbox class registers some functions that are used in the GA process. For instance, up in the global scope I have something like this defined: "toolbox.register("population",tools.initRepeat,list,toolbox.individual)". Then down in the main function where I do all the GA stuff, I actually draw from the population with the function "toolbox.population(n=numIndividuals)"
If I move the toolbox.register statements into the init function, then the error I get is "AttributeError: 'Toolbox' object has no attribute 'population'".
@user3325401 Can you just keep making the toolbox.register calls in the parent process? Or do those rely on weights being set?
In a roundabout way, yes, they do rely on weights being set. The weights are required for the "Fitness" to be created, which is required for the "Individual" to be created, which is something that must be registered in the toolbox: toolbox.register("individual",tools.initRepeat,creator.Individual,toolbox.indices,n=1) and toolbox.register("population",tools.initRepeat,list,toolbox.individual)
@user3325401 Can you just create the toolbox in the parent's global scope (same place you had it originally), and then make the register calls inside init, then? If the parent needs access to the stuff you register, make them both inside the for loop after you set weights = combo, and then do it again in init.
I have also been uncomfortable with DEAP's use of global scope, and I think I have an alternate solution for you.
It is possible to import a different version of each module per loop iteration, thereby avoiding any reliance on the global scope.
this_random = importlib.import_module("random")
this_creator = importlib.import_module("deap.creator")
this_algorithms = importlib.import_module("deap.algorithms")
this_base = importlib.import_module("deap.base")
this_tools = importlib.import_module("deap.tools")
As far as I can tell, this seems to play with multiprocessing.
As an example, here is a version of DEAP's onemax_mp.py that avoids putting any of the DEAP files in the global scope. I've included a loop in __main__ that changes the weights per iteration. (It maximizes the number of ones the first time, and minimizes it the second time.) Everything works fine with multiprocessing.
#!/usr/bin/env python2.7
# This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
import array
import multiprocessing
import sys
if sys.version_info < (2, 7):
print("mpga_onemax example requires Python >= 2.7.")
exit(1)
import numpy
import importlib
def evalOneMax(individual):
return sum(individual),
def do_onemax_mp(weights, random_seed=None):
""" Run the onemax problem with the given weights and random seed. """
# create local copies of each module
this_random = importlib.import_module("random")
this_creator = importlib.import_module("deap.creator")
this_algorithms = importlib.import_module("deap.algorithms")
this_base = importlib.import_module("deap.base")
this_tools = importlib.import_module("deap.tools")
# hoisted from global scope
this_creator.create("FitnessMax", this_base.Fitness, weights=weights)
this_creator.create("Individual", array.array, typecode='b',
fitness=this_creator.FitnessMax)
this_toolbox = this_base.Toolbox()
this_toolbox.register("attr_bool", this_random.randint, 0, 1)
this_toolbox.register("individual", this_tools.initRepeat,
this_creator.Individual, this_toolbox.attr_bool, 100)
this_toolbox.register("population", this_tools.initRepeat, list,
this_toolbox.individual)
this_toolbox.register("evaluate", evalOneMax)
this_toolbox.register("mate", this_tools.cxTwoPoint)
this_toolbox.register("mutate", this_tools.mutFlipBit, indpb=0.05)
this_toolbox.register("select", this_tools.selTournament, tournsize=3)
# hoisted from __main__
this_random.seed(random_seed)
pool = multiprocessing.Pool(processes=4)
this_toolbox.register("map", pool.map)
pop = this_toolbox.population(n=300)
hof = this_tools.HallOfFame(1)
this_stats = this_tools.Statistics(lambda ind: ind.fitness.values)
this_stats.register("avg", numpy.mean)
this_stats.register("std", numpy.std)
this_stats.register("min", numpy.min)
this_stats.register("max", numpy.max)
this_algorithms.eaSimple(pop, this_toolbox, cxpb=0.5, mutpb=0.2, ngen=40,
stats=this_stats, halloffame=hof)
pool.close()
if __name__ == "__main__":
for tgt_weights in ((1.0,), (-1.0,)):
do_onemax_mp(tgt_weights)
| common-pile/stackexchange_filtered |
Android > How to get service state for both sim cards in dual sim device?
I need it for API 22 and above.
I saw that we have telephonyManager.getServiceState - but I don't know how to get it for sim1 and for sim2 exactly.
Also we have CellInfo.serviceState - but it's only from API 28.
How to get it? I don't need any listeners, I just want to get service state at the certain time
Please help!
After some researches, implemented this solution:
@SuppressLint("MissingPermission", "NewApi")
private fun getServiceState(simSlotNmb: Int): String {
try {
val serviceState: ServiceState?
if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.O) {
serviceState = if (subscriptionManager != null && subscriptionManager!!.activeSubscriptionInfoCount > 1) {
val subsId =
subscriptionManager!!.getActiveSubscriptionInfoForSimSlotIndex(
simSlotNmb
).subscriptionId
val telephonyManager =
(context.getSystemService(Context.TELEPHONY_SERVICE) as TelephonyManager)
.createForSubscriptionId(subsId)
telephonyManager.serviceState
} else {
telephonyManager.serviceState
}
} else if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.N && subscriptionManager != null
&& subscriptionManager!!.activeSubscriptionInfoCount > 1) {
val subsId = subscriptionManager!!.getActiveSubscriptionInfoForSimSlotIndex(simSlotNmb).subscriptionId
val telephonyManagerForSlot
= (context.getSystemService(Context.TELEPHONY_SERVICE) as TelephonyManager)
.createForSubscriptionId(subsId)
telephonyManagerForSlot.listen(phoneStateListener, PhoneStateListener.LISTEN_SERVICE_STATE)
telephonyManagerForSlot.listen(phoneStateListener, PhoneStateListener.LISTEN_NONE)
serviceState = latestServiceState
} else if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP_MR1 && subscriptionManager != null
&& subscriptionManager!!.activeSubscriptionInfoCount > 1) {
val noConnectionDbm = -110
val dbm = getSignalDbm(simSlotNmb)
serviceState = ServiceState()
if(dbm < noConnectionDbm) {
serviceState.state = ServiceState.STATE_OUT_OF_SERVICE
} else {
serviceState.state = ServiceState.STATE_IN_SERVICE
}
} else {
telephonyManager.listen(phoneStateListener, PhoneStateListener.LISTEN_SERVICE_STATE)
telephonyManager.listen(phoneStateListener, PhoneStateListener.LISTEN_NONE)
serviceState = latestServiceState
}
return when (serviceState?.state) {
ServiceState.STATE_IN_SERVICE -> "in service"
ServiceState.STATE_EMERGENCY_ONLY -> "emergency only"
else -> "out of service"
}
} catch (exc: Exception) {
exc.printStackTrace()
return when(exc) {
is ArrayIndexOutOfBoundsException -> "out of service"
else -> Constants.error
}
}
}
| common-pile/stackexchange_filtered |
Exit "Events" on one site drastically differ from the corresponding "Acquisition" data on another site
I have a site (mysite) that refers users to another (othersite). Tracking how many referrers mysite sends to the othersite is crucial. I have access to analytics for both.
On mysite, when a user clicks a link to othersite I trigger a Google Analytics Event. This should track how many referrers mysite sends to othersite.
That link has UTM tags to identify our traffic and campaign. In the othersite analytics, I review “acquisition/campaign” data for these UTM IDs. This should be the referrers received from mysite.
However, On othersite I only see 1/6th of the traffic I expect when compared to mysite’s events. I don't expect an exact match but that is a big difference.
I have tried many things to debug this and everything seems to work as intended.
Initially, the links on mysite did not include UTM codes. At that
time I reviewed the othersite's traffic by referring 'source'.
Because of this discrepancy, we implemented the UTM tags to all links
on mysite. The same discrepancy has persisted.
I watch "real-time" analytics on both sites. I follow a link from mysite to othersite. I
see one event on my site and one new hit in "traffic sources" on
othersite. This is as expected and seems like everything is working
properly.
I use the JS console Analytics debugger too to watch the
events and pageviews being sent to google analytics. I see one event
on my site and a pageview that includes my UTM data on othersite. The
only thing interesting here is othersite has two different Google
Analytics codes and is sending this pageview hit to both. I don't
think that is an issue.
I can’t explain this discrepancy. Any suggestions on how to debug or possible explanations?
Is there a better way to go about achiving my goal of "Tracking how many referrers mysite sends to the othersite"?
If the sites are both in your control, can you parse the logs looking at the REFERER field to ascertain what you are after?
@davidgo I have access to the Google Analytics property for othersite. I do not have access to that server or anything else (are you asking about server logs?). I have requested to meet with the team that runs othersite.
Have you tried clicking on the link multiple times to see what happens? What if one person could generate many events on your site?
@StephenOstermiller Yes, a user could click the link multiple times. Each time would register an event in GA. GA reports "Total Events" and "Unique Events" which I think captures this. I am looking at "Unique Events" and still see this discrepency.
| common-pile/stackexchange_filtered |
iOS custom UITableView and UITableViewCell with columns (Twitter app)
I'm developing an iPhone/iPod application, and the designer used a lot of these "Twitter App Concepts" (screenshot). As you can see, the TableView has a shadow and the cells can be split in columns (those columns are clickable).
I just need to create those columns, any idea how I can accomplish that?
The prettykit library is probably a good place to start, as the library is a subclass of UITableViewCell and UINavigationController.
http://cocoacontrols.com/platforms/ios/controls/prettykit
Well a UITableViewCell is a UIView so in your tableView:cellForRowAtIndexPath: when you hit that row simply add 3 subviews to the UITableViewCell.
There is one downside to this approach though and that is if there are a lot of these "Column Cells" then it will hinder performance. You also tend to want to avoid more then 5 subviews in a UITableViewCell
In case you are wondering "Why can't i just add multiple cells to a Single Row?"
Good question and the reason is UITableView's dequeueReusableCellWithIdentifier: (Reference) this takes an Index Path which is a combination of the Section Number and Row Number the Cell is in.
As it only returns a single cel, it's impossible to return multiple cells (unless you write a custom implementation), but you can return a cell with multiple subviews that has a unique identifier ;)
UITableViewCell Class Reference
UIView Class Reference
Edit: The library that danielbeard linked looks to be a good implementation to use.
Use your own subclass of UITableViewCell instead of UItableviewcell. And Customise your cell as you want.
| common-pile/stackexchange_filtered |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.